From e0d03a6fc6274164c5f96131f5c2ed240fbdaa47 Mon Sep 17 00:00:00 2001 From: csric <59389055+CsRic@users.noreply.github.com> Date: Thu, 20 Apr 2023 19:26:44 +0800 Subject: [PATCH 01/26] Detached ppo (#9) * run the base * working on dist ppo * sync * detached trainer * update detached trainer. no maker update function * facing init problem * 1 maker 1 trainer detached run. but no model update * facing cuda problem * fix save functions * verified maker update * nothing * add ignore * analyize loss issue * remove some debug codes * facing 2m1t stuck issue * 2m1t verified * do not use torchrun * working on 2m2t * working on 2m2t * initialize strategy in ray actor env * facing actor's init order issue * facing ddp model update issue (need unwarp ddp) * unwrap ddp actor * checking 1m2t stuck problem * nothing * set timeout for trainer choosing. It solves the stuck problem! * delete some debug output * rename to sync with upstream * rename to sync with upstream * coati rename * nothing * I am going to detach the replaybuffer from trainer and make it a Ray Actor. Two benefits: 1. support TP trainer. 2. asynchronized buffer operations * experience_maker_holder performs target-revolving _send_experience() instead of length comparison. * move code to ray subfolder * working on pipeline inference * apply comments * working on pipeline strategy. in progress. * remove pipeline code. clean this branch * update remote parameters by state_dict. no test * nothing * state_dict sharding transfer * merge debug branch * gemini _unwrap_model fix * simplify code * simplify code & fix LoRALinear AttributeError * critic unwrapped state_dict --------- Co-authored-by: csric --- applications/Chat/coati/models/lora.py | 8 +- .../Chat/coati/ray/example/1m1t_quantize.py | 147 +++++++++++ .../coati/ray/src/detached_trainer_base.py | 17 +- .../coati/ray/src/detached_trainer_ppo.py | 110 ++++++-- .../coati/ray/src/experience_maker_holder.py | 240 ++++++++++++------ applications/Chat/coati/ray/src/utils.py | 89 ++++++- .../Chat/coati/trainer/strategies/base.py | 5 + .../coati/trainer/strategies/colossalai.py | 23 +- .../Chat/coati/trainer/strategies/ddp.py | 8 +- .../Chat/coati/trainer/strategies/naive.py | 12 + 10 files changed, 537 insertions(+), 122 deletions(-) create mode 100644 applications/Chat/coati/ray/example/1m1t_quantize.py diff --git a/applications/Chat/coati/models/lora.py b/applications/Chat/coati/models/lora.py index 7f6eb73262fa..3d30208c05f4 100644 --- a/applications/Chat/coati/models/lora.py +++ b/applications/Chat/coati/models/lora.py @@ -61,7 +61,13 @@ def T(w): if self.merge_weights and self.merged: # Make sure that the weights are not merged if self.r > 0: - self.weight.data -= T(self.lora_B @ self.lora_A) * self.scaling + if not hasattr(self, "lora_A") or not hasattr(self, "lora_B"): + # csric: temporary fix + self.lora_A = nn.Parameter(self.weight.new_empty((self.r, self.in_features))) + self.lora_B = nn.Parameter(self.weight.new_empty((self.out_features, self.r))) + self.reset_parameters() + else: + self.weight.data -= T(self.lora_B @ self.lora_A) * self.scaling self.merged = False def eval(self): diff --git a/applications/Chat/coati/ray/example/1m1t_quantize.py b/applications/Chat/coati/ray/example/1m1t_quantize.py new file mode 100644 index 000000000000..12a60fd65d8b --- /dev/null +++ b/applications/Chat/coati/ray/example/1m1t_quantize.py @@ -0,0 +1,147 @@ +import argparse +import pandas as pd +import torch +import ray +import os +import socket + +from coati.ray.src.experience_maker_holder import ExperienceMakerHolder +from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer + +from transformers import AutoTokenizer, BloomTokenizerFast +from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainer = {'local_rank' : '0', + 'rank' : '0', + 'world_size' : '1', + 'master_port' : trainer_port, + 'master_addr' : master_addr} + + # maker_env_info + maker_port = str(get_free_port()) + env_info_maker = {'local_rank' : '0', + 'rank' : '0', + 'world_size' : '1', + 'master_port' : maker_port, + 'master_addr' : master_addr} + + # configure tokenizer + if args.model == 'gpt2': + tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + tokenizer.pad_token = tokenizer.eos_token + elif args.model == 'bloom': + tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) + tokenizer.pad_token = tokenizer.eos_token + elif args.model == 'opt': + tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") + else: + raise ValueError(f'Unsupported model "{args.model}"') + + # configure Trainer + trainer_ref = DetachedPPOTrainer.options(name="trainer1", num_gpus=1, max_concurrency=2).remote( + experience_maker_holder_name_list=["maker1"], + strategy=args.trainer_strategy, + model=args.model, + env_info = env_info_trainer, + pretrained=args.pretrain, + lora_rank=args.lora_rank, + train_batch_size=args.train_batch_size, + buffer_limit=16, + experience_batch_size=args.experience_batch_size, + max_epochs=args.max_epochs, + #kwargs: + max_length=128, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + debug=args.debug, + ) + + # configure Experience Maker + experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( + detached_trainer_name_list=["trainer1"], + strategy=args.maker_strategy, + env_info = env_info_maker, + experience_batch_size=args.experience_batch_size, + kl_coef=0.1, + #kwargs: + max_length=128, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + debug=args.debug, + ) + + # a 'jump wire' to set quantized initial_model and reward_model + + + # trainer send its actor and critic to experience holders. + # ray.get(trainer_ref.initialize_remote_makers.remote()) + + # configure sampler + dataset = pd.read_csv(args.prompt_path)['prompt'] + + def tokenize_fn(texts): + # MUST padding to max length to ensure inputs of all ranks have the same length + # Different length may lead to hang when using gemini, as different generation steps + batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) + return {k: v.cuda() for k, v in batch.items()} + + trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) + num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * args.max_epochs + 3 # +3 for fault tolerance + maker_done_ref = experience_holder_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) + + ray.get([trainer_done_ref, maker_done_ref]) + + # save model checkpoint after fitting + trainer_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) + # save optimizer checkpoint on all ranks + if args.need_optim_ckpt: + trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), + only_rank0=False) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('prompt_path') + parser.add_argument('--trainer_strategy', + choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], + default='naive') + parser.add_argument('--maker_strategy', + choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], + default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') + parser.add_argument('--need_optim_ckpt', type=bool, default=False) + parser.add_argument('--num_episodes', type=int, default=10) + parser.add_argument('--max_timesteps', type=int, default=10) + parser.add_argument('--update_timesteps', type=int, default=10) + parser.add_argument('--max_epochs', type=int, default=5) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + ray.init(namespace=os.environ["RAY_NAMESPACE"]) + main(args) diff --git a/applications/Chat/coati/ray/src/detached_trainer_base.py b/applications/Chat/coati/ray/src/detached_trainer_base.py index f1ed1ec71499..f5e52e8a3b3a 100644 --- a/applications/Chat/coati/ray/src/detached_trainer_base.py +++ b/applications/Chat/coati/ray/src/detached_trainer_base.py @@ -24,6 +24,7 @@ class DetachedTrainer(ABC): data_loader_pin_memory (bool, defaults to True): whether to pin memory for data loader callbacks (List[Callback], defaults to []): the callbacks to call during training process generate_kwargs (dict, optional): the kwargs to use while model generating + ''' def __init__(self, @@ -45,6 +46,11 @@ def __init__(self, self.generate_kwargs = generate_kwargs self.target_holder_name_list = experience_maker_holder_name_list self.target_holder_list = [] + + if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: + self._debug = True + else: + self._debug = False def update_target_holder_list(self, experience_maker_holder_name_list): self.target_holder_name_list = experience_maker_holder_name_list @@ -63,13 +69,13 @@ def training_step(self, experience: Experience) -> Dict[str, Any]: def _learn(self): pbar = tqdm(range(self.max_epochs), desc='Train epoch', disable=not is_rank_0()) for _ in pbar: - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: + if self._debug: print("[trainer] sampling exp") experience = self._buffer_sample() - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: + if self._debug: print("[trainer] training step") metrics = self.training_step(experience) - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: + if self._debug: print("[trainer] step over") pbar.set_postfix(metrics) @@ -88,15 +94,14 @@ def fit(self, num_episodes: int = 50000, max_timesteps: int = 500, update_timest @ray.method(concurrency_group="buffer_length") def buffer_get_length(self): # called by ExperienceMakerHolder - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: + if self._debug: print("[trainer] telling length") return self.detached_replay_buffer.get_length() @ray.method(concurrency_group="buffer_append") def buffer_append(self, experience: Experience): # called by ExperienceMakerHolder - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - # print(f"[trainer] receiving exp. Current buffer length: {self.detached_replay_buffer.get_length()}") + if self._debug: print(f"[trainer] receiving exp.") self.detached_replay_buffer.append(experience) diff --git a/applications/Chat/coati/ray/src/detached_trainer_ppo.py b/applications/Chat/coati/ray/src/detached_trainer_ppo.py index 838e82d07f4a..071f0ddab2b9 100644 --- a/applications/Chat/coati/ray/src/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/src/detached_trainer_ppo.py @@ -14,11 +14,13 @@ import ray -from .utils import is_rank_0, get_cuda_actor_critic_from_args, get_strategy_from_args, set_dist_env +from .utils import is_rank_0, get_actor_from_args, get_critic_from_args, get_strategy_from_args, set_dist_env, \ + state_dict_to + from .detached_trainer_base import DetachedTrainer -@ray.remote(concurrency_groups={"buffer_length": 1, "buffer_append":1, "buffer_sample":1,"model_io": 1, "compute": 1}) +@ray.remote(concurrency_groups={"buffer_length": 1, "buffer_append": 1, "buffer_sample": 1, "model_io": 1, "compute": 1}) class DetachedPPOTrainer(DetachedTrainer): ''' Detached Trainer for PPO algorithm @@ -44,9 +46,12 @@ def __init__(self, experience_maker_holder_name_list: List[str], strategy: str, model: str, - env_info: Dict[str, str] = None, pretrained: str = None, lora_rank: int = 0, + cr_model: str = None, # if not None, use below cr settings for critic + cr_pretrained: str = None, + cr_lora_rank: int = 0, + env_info: Dict[str, str] = None, train_batch_size: int = 8, buffer_limit: int = 0, buffer_cpu_offload: bool = True, @@ -63,24 +68,32 @@ def __init__(self, # configure strategy self.strategy = get_strategy_from_args(strategy) # configure models, loss and optimizers + if cr_model is None: + cr_model = model + cr_pretrained = pretrained + cr_lora_rank = lora_rank + with self.strategy.model_init_context(): - self.actor, self.critic = get_cuda_actor_critic_from_args(model, pretrained, lora_rank) + self.actor = get_actor_from_args(model, pretrained, lora_rank) + self.critic = get_critic_from_args(cr_model, cr_pretrained, cr_lora_rank) if strategy != 'colossalai_gemini': - self.actor.to(torch.float16).to(torch.cuda.current_device()) - self.critic.to(torch.float16).to(torch.cuda.current_device()) + self.actor.to(torch.cuda.current_device()) #.to(torch.float16) + self.critic.to(torch.cuda.current_device()) #.to(torch.float16) + if strategy.startswith('colossalai'): - self.actor_optim = HybridAdam(self.actor.parameters(), lr=5e-6) - self.critic_optim = HybridAdam(self.critic.parameters(), lr=5e-6) + self.actor_optim = HybridAdam(self.actor.parameters(), lr=1e-7) + self.critic_optim = HybridAdam(self.critic.parameters(), lr=1e-7) else: - self.actor_optim = Adam(self.actor.parameters(), lr=5e-6) - self.critic_optim = Adam(self.critic.parameters(), lr=5e-6) + self.actor_optim = Adam(self.actor.parameters(), lr=1e-7) + self.critic_optim = Adam(self.critic.parameters(), lr=1e-7) (self.actor, self.actor_optim), (self.critic, self.critic_optim) = \ self.strategy.prepare((self.actor, self.actor_optim), (self.critic, self.critic_optim)) - generate_kwargs = _set_default_generate_kwargs(self.strategy, generate_kwargs, self.actor) + # configure trainer + generate_kwargs = _set_default_generate_kwargs(self.strategy, generate_kwargs, self.actor) self.actor_loss_fn = PolicyLoss(eps_clip) self.critic_loss_fn = ValueLoss(value_clip) @@ -94,25 +107,69 @@ def __init__(self, callbacks=callbacks, **generate_kwargs) + # for remote maker initialization + self._model_str = model + self._cr_model_str = cr_model + self._pretrained = pretrained + self._cr_pretrained = cr_pretrained + @ray.method(concurrency_group="model_io") - def _update_remote_makers(self): + def _update_remote_makers(self, **config): # TODO: balance duties if is_rank_0(): self.update_target_holder_list(self.target_holder_name_list) - for target_holder in self.target_holder_list: - # TODO: reduce malloc - with torch.no_grad(): - ray.get(target_holder.update_experience_maker.remote(self._get_unwrapped_actor(), self._get_unwrapped_critic())) - + with torch.no_grad(): + # actor: + # mark start + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(chunk_start=True) + # sending loop + for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_actor(self.actor), **config): + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(new_actor_state_dict = state_dict_shard) + # mark end + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(chunk_end=True) + # critic + # mark start + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(chunk_start=True) + # sending loop + for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_critic(self.critic), **config): + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(new_critic_state_dict = state_dict_shard) + # mark end + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(chunk_end=True) + @ray.method(concurrency_group="model_io") - def initialize_remote_makers(self): + def initialize_remote_makers(self, **config): # TODO: balance duties if is_rank_0(): self.update_target_holder_list(self.target_holder_name_list) - for target_holder in self.target_holder_list: - # TODO: reduce malloc - with torch.no_grad(): - ray.get(target_holder.initialize_experience_maker.remote(self._get_unwrapped_actor(), self._get_unwrapped_critic())) + with torch.no_grad(): + # actor / initial_model: + # mark start + for target_holder in self.target_holder_list: + target_holder.initialize_experience_maker.remote(actor_model=self._model_str,actor_pretrained=self._pretrained,chunk_start=True) + # sending loop + for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_actor(self.actor), **config): + for target_holder in self.target_holder_list: + target_holder.initialize_experience_maker.remote(actor_state_dict=state_dict_shard) + # mark end + for target_holder in self.target_holder_list: + target_holder.initialize_experience_maker.remote(actor_model=self._model_str, chunk_end=True) + # critic / reward_model: + # mark start + for target_holder in self.target_holder_list: + target_holder.initialize_experience_maker.remote(critic_model=self._cr_model_str,critic_pretrained=self._cr_pretrained,chunk_start=True) + # sending loop + for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_critic(self.critic), **config): + for target_holder in self.target_holder_list: + target_holder.initialize_experience_maker.remote(critic_state_dict=state_dict_shard) + # mark end + for target_holder in self.target_holder_list: + target_holder.initialize_experience_maker.remote(critic_model=self._cr_model_str, chunk_end=True) @ray.method(concurrency_group="compute") def training_step(self, experience: Experience) -> Dict[str, float]: @@ -177,6 +234,14 @@ def _get_unwrapped_critic(self): elif isinstance(self.strategy, NaiveStrategy): return self.critic + def _get_model_state_dict_shard(self, model: torch.nn.Module, **config): + try: + self.strategy.merge_lora_weight(model) + except AttributeError: + pass + for state_dict in self.strategy.get_model_state_dict_shard(model, **config): + yield state_dict_to(state_dict) + def _set_default_generate_kwargs(strategy: Strategy, generate_kwargs: dict, actor: Actor) -> None: origin_model = strategy._unwrap_actor(actor) @@ -189,4 +254,3 @@ def _set_default_generate_kwargs(strategy: Strategy, generate_kwargs: dict, acto new_kwargs['update_model_kwargs_fn'] = update_model_kwargs_fn return new_kwargs - \ No newline at end of file diff --git a/applications/Chat/coati/ray/src/experience_maker_holder.py b/applications/Chat/coati/ray/src/experience_maker_holder.py index 94e4a3d537a5..67b89a68119a 100644 --- a/applications/Chat/coati/ray/src/experience_maker_holder.py +++ b/applications/Chat/coati/ray/src/experience_maker_holder.py @@ -13,16 +13,17 @@ from threading import Lock import time import os +import tracemalloc - -from .utils import is_rank_0, get_strategy_from_args, set_dist_env +from .utils import is_rank_0, get_strategy_from_args, set_dist_env, get_actor_from_args, get_critic_from_args, \ + get_reward_model_from_args @ray.remote(concurrency_groups={"experience_io": 1, "model_io": 1, "compute": 1}) class ExperienceMakerHolder: ''' Args: - detached_trainer_name_list: str list to get ray actor handleskkk + detached_trainer_name_list: str list to get ray actor handles strategy: experience_batch_size: batch size of generated experience kl_coef: the coefficient of kl divergence loss @@ -46,18 +47,39 @@ def __init__(self, self.experience_batch_size = experience_batch_size self.kl_coef = kl_coef self.generate_kwargs = generate_kwargs - # Need a trainer to give an actor and a critic via initialize_experience_maker(...) actor, critic, reward_model, initial_model = None, None, None, None self.experience_maker = NaiveExperienceMaker(actor, critic, reward_model, initial_model, self.kl_coef) + self._model_visit_lock = Lock() - self.fully_initialized = False + self._initial_model_initialized = False + self._reward_model_initialized = False + self._actor_initialized = False + self._critic_initialized = False + if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: + self._debug = True + else: + self._debug = False + self.target_auto_balance = False + + if self._debug: print('[maker] Waiting for INIT') def _get_ready(self): - while not self.fully_initialized: + while not self._fully_initialized(): time.sleep(1.0) + def _fully_initialized(self): + if not self._initial_model_initialized: + return False + if not self._reward_model_initialized: + return False + if not self._actor_initialized: + return False + if not self._critic_initialized: + return False + return True + def update_target_trainer_list(self, detached_trainer_name_list): self.target_trainer_list = [] for name in detached_trainer_name_list: @@ -66,7 +88,6 @@ def update_target_trainer_list(self, detached_trainer_name_list): # copy from ../trainer/base.py @ray.method(concurrency_group="compute") def _make_experience(self, inputs: Union[Tensor, Dict[str, Tensor]]) -> Experience: - self._get_ready() if isinstance(inputs, Tensor): return self.experience_maker.make_experience(inputs, **self.generate_kwargs) elif isinstance(inputs, dict): @@ -76,40 +97,37 @@ def _make_experience(self, inputs: Union[Tensor, Dict[str, Tensor]]) -> Experien @ray.method(concurrency_group="experience_io") def _send_experience(self, experience): - ''' - ignore it - - # choose a trainer that has the least experience batch in its detached_replay_buffer - chosen_trainer = None - min_length = None - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print("[maker] choosing target trainer") - while chosen_trainer is None: - for target_trainer in self.target_trainer_list: - try: - temp_length = ray.get(target_trainer.buffer_get_length.remote(), timeout=0.1) - if min_length is None: - min_length = temp_length - chosen_trainer = target_trainer - else: - if temp_length < min_length: + if not self.target_auto_balance: + # choose the trainer in polling mannar + if not hasattr(self, "_target_idx"): + self._target_idx = 0 + chosen_trainer = self.target_trainer_list[self._target_idx] + if self._debug: + print(f"[maker] sending exp to {chosen_trainer}") + chosen_trainer.buffer_append.remote(experience) + self._target_idx = (self._target_idx + 1) % len(self.target_trainer_list) + else: + # choose a trainer that has the least experience batch in its detached_replay_buffer + chosen_trainer = None + min_length = None + if self._debug: + print("[maker] choosing tartget trainer") + while chosen_trainer is None: + for target_trainer in self.target_trainer_list: + try: + temp_length = ray.get(target_trainer.buffer_get_length.remote(), timeout=0.1) + if min_length is None: min_length = temp_length chosen_trainer = target_trainer - except GetTimeoutError: - pass - - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print(f"[maker] sending exp to {chosen_trainer}") - chosen_trainer.buffer_append.remote(experience) - ''' - # - if not hasattr(self, "_target_idx"): - self._target_idx = 0 - chosen_trainer = self.target_trainer_list[self._target_idx] - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print(f"[maker] sending exp to {chosen_trainer}") - chosen_trainer.buffer_append.remote(experience) - self._target_idx = (self._target_idx + 1) % len(self.target_trainer_list) + else: + if temp_length < min_length: + min_length = temp_length + chosen_trainer = target_trainer + except GetTimeoutError: + pass + if self._debug: + print(f"[maker] sending exp to {chosen_trainer}") + chosen_trainer.buffer_append.remote(experience) def workingloop(self, dataset, tokenizer: Optional[Callable[[Any], dict]] = None, times=5000 * 50000): self._get_ready() @@ -126,47 +144,123 @@ def workingloop(self, dataset, tokenizer: Optional[Callable[[Any], dict]] = None self._send_experience(experience=experience) @ray.method(concurrency_group="model_io") - def initialize_experience_maker(self, init_actor: Actor, init_critic: Critic): + def initialize_experience_maker(self, + actor_model: str = None, + actor_pretrained: str = None, + actor_state_dict: Dict[str, Any] = None, + critic_model: str = None, + critic_pretrained: str = None, + critic_state_dict: Dict[str, Any] = None, + chunk_start: bool = None, + chunk_end: bool = None): ''' - called by trainer. Only once. + called by trainer + chunk_start: Set True at the first call. Before sending state_dict calls + chunk_end: Set True at the last call. After sending state_dict calls. + + TODO: load_state_dict integrate with model-sharding strategy ''' - # TODO: reduce malloc - if self.fully_initialized: + if self._fully_initialized(): return - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print('[maker] INIT') + + if chunk_start: + if self._debug: + print('[maker] INIT') + with torch.no_grad(): + # (csric) any better way to get model structure? + with self.strategy.model_init_context(): + if not self._actor_initialized and actor_model is not None: + self.experience_maker.actor = get_actor_from_args(actor_model, actor_pretrained).half().requires_grad_(False) + if not self._critic_initialized and critic_model is not None: + self.experience_maker.critic = get_critic_from_args(critic_model, critic_pretrained).half().requires_grad_(False) + if not self._initial_model_initialized and actor_model is not None: + self.experience_maker.initial_model = get_actor_from_args(actor_model, actor_pretrained).half().requires_grad_(False) + if not self._reward_model_initialized and critic_model is not None: + self.experience_maker.reward_model = get_reward_model_from_args(critic_model, critic_pretrained).half().requires_grad_(False) + with torch.no_grad(): - with self.strategy.model_init_context(): - actor = init_actor - critic = init_critic - initial_model = deepcopy(actor) - reward_model = RewardModel(deepcopy(critic.model), - deepcopy(critic.value_head)).to(torch.cuda.current_device()) - if self.strategy_str != 'colossalai_gemini': - actor.to(torch.float16).to(torch.cuda.current_device()) - critic.to(torch.float16).to(torch.cuda.current_device()) - initial_model.to(torch.float16).to(torch.cuda.current_device()) - reward_model.to(torch.float16).to(torch.cuda.current_device()) - - self.experience_maker.actor = self.strategy.prepare(actor) - self.experience_maker.critic = self.strategy.prepare(critic) - self.experience_maker.initial_model = self.strategy.prepare(initial_model) - self.experience_maker.reward_model = self.strategy.prepare(reward_model) - self.fully_initialized = True + if not self._actor_initialized and actor_state_dict is not None: + self.experience_maker.actor.model.load_state_dict(actor_state_dict, strict=False) + if not self._critic_initialized and critic_state_dict is not None: + self.experience_maker.critic.load_state_dict(critic_state_dict, strict=False) + if not self._initial_model_initialized and actor_state_dict is not None: + self.experience_maker.initial_model.model.load_state_dict(actor_state_dict, strict=False) + if not self._reward_model_initialized and critic_state_dict is not None: + self.experience_maker.reward_model.load_state_dict(critic_state_dict, strict=False) + + + if chunk_end: + with torch.no_grad(): + if actor_model is not None: + if not self._actor_initialized: + self.experience_maker.actor = self.strategy.prepare(self.experience_maker.actor.to(torch.cuda.current_device())) + if not self._initial_model_initialized: + self.experience_maker.initial_model = self.strategy.prepare(self.experience_maker.initial_model.to(torch.cuda.current_device())) + self._actor_initialized = True + self._initial_model_initialized = True + if critic_model is not None: + if not self._critic_initialized: + self.experience_maker.critic = self.strategy.prepare(self.experience_maker.critic.to(torch.cuda.current_device())) + if not self._reward_model_initialized: + self.experience_maker.reward_model = self.strategy.prepare(self.experience_maker.reward_model.to(torch.cuda.current_device())) + self._critic_initialized = True + self._reward_model_initialized = True + + + def initialize_experience_maker_local(self, + initial_model_func=None, + reward_model_func=None, + actor_func=None, + critic_func=None): + ''' + Use function call to construct the model here, because some strategy requieres env_info + The model initialized here will be IGNORED in initialize_experience_maker. + initial_model and reward_model can have their own strategy rather than self.strategy. For example, Quantization. + ''' + + if actor_func is not None: + self.experience_maker.actor = actor_func() + self._actor_initialized = True + if critic_func is not None: + self.experience_maker.critic = critic_func() + self._critic_initialized = True + if initial_model_func is not None: + self.experience_maker.initial_model = initial_model_func() + self._initial_model_initialized = True + if reward_model_func is not None: + self.experience_maker.reward_model = reward_model_func() + self._reward_model_initialized = True @ray.method(concurrency_group="model_io") - def update_experience_maker(self, new_actor: Actor, new_critic: Critic): + def update_experience_maker(self, + new_actor_state_dict: Dict[str, Any] = None, + new_critic_state_dict: Dict[str, Any] = None, + chunk_start: bool = None, + chunk_end: bool = None): ''' called by trainer + chunk_start: Set True at the first call. Before sending state_dict calls + chunk_end: Set True at the last call. After sending state_dict calls. + + TODO: load_state_dict integrate with model-sharding strategy ''' - # TODO: reduce malloc - self._model_visit_lock.acquire() - with torch.no_grad(): - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: + _watch_memory = True + if chunk_start: + if self._debug: print("[maker] UPDATE ") - if self.strategy_str != 'colossalai_gemini': - new_actor.to(torch.float16).to(torch.cuda.current_device()) - new_critic.to(torch.float16).to(torch.cuda.current_device()) - self.experience_maker.actor = self.strategy.prepare(new_actor) - self.experience_maker.critic = self.strategy.prepare(new_critic) - self._model_visit_lock.release() + if _watch_memory: + tracemalloc.start() + self._model_visit_lock.acquire() + + with torch.no_grad(): + if new_actor_state_dict is not None: + self.experience_maker.actor.model.load_state_dict(new_actor_state_dict, strict=False) + if new_critic_state_dict is not None: + self.experience_maker.critic.load_state_dict(new_critic_state_dict, strict=False) + + if chunk_end: + self._model_visit_lock.release() + if _watch_memory: + current, peak = tracemalloc.get_traced_memory() + print(f"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB") + tracemalloc.stop() diff --git a/applications/Chat/coati/ray/src/utils.py b/applications/Chat/coati/ray/src/utils.py index c750879b6d18..827c2b8c6dc9 100644 --- a/applications/Chat/coati/ray/src/utils.py +++ b/applications/Chat/coati/ray/src/utils.py @@ -1,30 +1,64 @@ import torch.distributed as dist from typing import Any, Callable, Dict, List, Optional -from coati.models.bloom import BLOOMActor, BLOOMCritic -from coati.models.gpt import GPTActor, GPTCritic -from coati.models.opt import OPTActor, OPTCritic +from coati.models.bloom import BLOOMActor, BLOOMCritic, BLOOMRM +from coati.models.gpt import GPTActor, GPTCritic, GPTRM +from coati.models.opt import OPTActor, OPTCritic, OPTRM +from coati.models.roberta import RoBERTaRM, RoBERTaActor, RoBERTaCritic +from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM + from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy import torch import os + def is_rank_0() -> bool: return not dist.is_initialized() or dist.get_rank() == 0 -def get_cuda_actor_critic_from_args(model: str, pretrained: str = None, lora_rank=0): +def get_actor_from_args(model: str, pretrained: str = None, lora_rank = 0): if model == 'gpt2': - actor = GPTActor(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) - critic = GPTCritic(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) + actor = GPTActor(pretrained=pretrained, lora_rank=lora_rank) elif model == 'bloom': - actor = BLOOMActor(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) - critic = BLOOMCritic(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) + actor = BLOOMActor(pretrained=pretrained, lora_rank=lora_rank) elif model == 'opt': - actor = OPTActor(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) - critic = OPTCritic(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) + actor = OPTActor(pretrained=pretrained, lora_rank=lora_rank) + elif model == 'llama': + actor = LlamaActor(pretrained=pretrained, lora_rank=lora_rank) + elif model == 'roberta': + actor = RoBERTaActor(pretrained=pretrained, lora_rank=lora_rank) else: - raise ValueError(f'Unsupported model "{model}"') - return actor, critic + raise ValueError(f'Unsupported actor model "{model}"') + return actor +def get_critic_from_args(model: str, pretrained: str = None, lora_rank = 0): + if model == 'gpt2': + critic = GPTCritic(pretrained=pretrained, lora_rank=lora_rank, use_action_mask=True) + elif model == 'bloom': + critic = BLOOMCritic(pretrained=pretrained, lora_rank=lora_rank, use_action_mask=True) + elif model == 'opt': + critic = OPTCritic(pretrained=pretrained, lora_rank=lora_rank, use_action_mask=True) + elif model == 'llama': + critic = LlamaCritic(pretrained=pretrained, lora_rank=lora_rank, use_action_mask=True) + elif model == 'roberta': + critic = RoBERTaCritic(pretrained=pretrained, lora_rank=lora_rank, use_action_mask=True) + else: + raise ValueError(f'Unsupported reward model "{model}"') + return critic + +def get_reward_model_from_args(model: str, pretrained: str = None): + if model == 'gpt2': + reward_model = GPTRM(pretrained=pretrained) + elif model == 'bloom': + reward_model = BLOOMRM(pretrained=pretrained) + elif model == 'opt': + reward_model = OPTRM(pretrained=pretrained) + elif model == 'llama': + reward_model = LlamaRM(pretrained=pretrained) + elif model == 'roberta': + reward_model = RoBERTaRM(pretrained=pretrained) + else: + raise ValueError(f'Unsupported reward model "{model}"') + return reward_model def get_strategy_from_args(strategy: str): if strategy == 'naive': @@ -40,9 +74,40 @@ def get_strategy_from_args(strategy: str): return strategy_ +from transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer, RobertaTokenizer +from coati.utils import prepare_llama_tokenizer_and_embedding + +def get_tokenizer_from_args(model: str, **kwargs): + if model == 'gpt2': + tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + elif model == 'bloom': + tokenizer = BloomTokenizerFast.from_pretrained('bigscience/bloom-560m') + elif model == 'opt': + tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") + elif model == 'llama': + pretrain_path = kwargs["pretrain"] + tokenizer = AutoTokenizer.from_pretrained(pretrain_path) + elif model == 'roberta': + tokenizer = RobertaTokenizer.from_pretrained("roberta-base") + else: + raise ValueError(f'Unsupported model "{model}"') + + tokenizer.pad_token = tokenizer.eos_token + return tokenizer + def set_dist_env(env_info: Dict[str, str]): os.environ["RANK"] = env_info['rank'] os.environ["LOCAL_RANK"] = env_info['local_rank'] os.environ["WORLD_SIZE"] = env_info['world_size'] os.environ['MASTER_PORT'] = env_info['master_port'] os.environ['MASTER_ADDR'] = env_info['master_addr'] + + +def state_dict_to(state_dict: Dict[str, Any], dtype: torch.dtype = torch.float16, device: torch.device = torch.device('cpu')): + ''' + keep state_dict intact + ''' + new_state_dict = {} + for k, v in state_dict.items(): + new_state_dict[k] = v.to(dtype = dtype, device = device) + return new_state_dict \ No newline at end of file diff --git a/applications/Chat/coati/trainer/strategies/base.py b/applications/Chat/coati/trainer/strategies/base.py index 7d25138561ea..e0232fbc64ad 100644 --- a/applications/Chat/coati/trainer/strategies/base.py +++ b/applications/Chat/coati/trainer/strategies/base.py @@ -112,6 +112,11 @@ def _unwrap_actor(actor: Actor) -> nn.Module: """ return Strategy._unwrap_model(actor) + @staticmethod + def _unwrap_critic(critic: Critic) -> nn.Module: + return Strategy._unwrap_model(critic) + + @abstractmethod def save_model(self, model: nn.Module, diff --git a/applications/Chat/coati/trainer/strategies/colossalai.py b/applications/Chat/coati/trainer/strategies/colossalai.py index ba85ba76d4b1..5a6021c5013f 100644 --- a/applications/Chat/coati/trainer/strategies/colossalai.py +++ b/applications/Chat/coati/trainer/strategies/colossalai.py @@ -5,7 +5,7 @@ import torch.distributed as dist import torch.nn as nn import torch.optim as optim -from coati.models.base import LM, Actor, RewardModel +from coati.models.base import LM, Actor, RewardModel, Critic from coati.models.lora import LoraLinear from torch.optim import Optimizer from transformers.modeling_utils import PreTrainedModel @@ -159,12 +159,12 @@ def _unwrap_actor(actor: Actor) -> nn.Module: return model.module return model - def _unwrap_model(self, model: Union[nn.Module, ZeroDDP]) -> nn.Module: - if isinstance(model, ZeroDDP) and self.stage == 3: - logger.info(f"model type: {type(model)}, get static torch model") - model = get_static_torch_model(model) - logger.info(f"unwrapped_model type: {type(model)}") + @staticmethod + def _unwrap_critic(critic: Critic) -> nn.Module: + return Strategy._unwrap_critic(critic) + + def _unwrap_model(self, model: Union[nn.Module, ZeroDDP]) -> nn.Module: return super()._unwrap_model(model) def save_model(self, @@ -210,3 +210,14 @@ def save_optimizer(self, optimizer: Optimizer, path: str, only_rank0: bool = Fal raise RuntimeError( f'Optimizer states are sharded when using ColossalAIStrategy. Only rank0 is not supported.') torch.save(optimizer.state_dict(), path) + + def get_model_state_dict_shard(self, model: nn.Module, **config): + if self.stage != 3: + yield from super().get_model_state_dict_shard(model, **config) + else: + unwrapped_model = self._unwrap_model(model) + for module in unwrapped_model.modules(): + if isinstance(module, LoraLinear): + module.merge_weights = True + module.eval() + yield from model.state_dict_shard(max_shard_size=1024) \ No newline at end of file diff --git a/applications/Chat/coati/trainer/strategies/ddp.py b/applications/Chat/coati/trainer/strategies/ddp.py index 8a8c4b3c2f4e..a0fd3fa27a58 100644 --- a/applications/Chat/coati/trainer/strategies/ddp.py +++ b/applications/Chat/coati/trainer/strategies/ddp.py @@ -7,7 +7,7 @@ import torch import torch.distributed as dist import torch.nn as nn -from coati.models.base import LM, Actor, RewardModel +from coati.models.base import LM, Actor, RewardModel, Critic from coati.models.lora import LoraLinear from coati.replay_buffer import ReplayBuffer from torch.nn.parallel import DistributedDataParallel as DDP @@ -74,6 +74,11 @@ def setup_dataloader(self, replay_buffer: ReplayBuffer, pin_memory: bool = False def _unwrap_actor(actor: Actor) -> nn.Module: model: DDP = Strategy._unwrap_actor(actor) return model.module + + @staticmethod + def _unwrap_critic(critic: Critic) -> nn.Module: + model: DDP = Strategy._unwrap_critic(critic) + return model.module def save_model(self, model: nn.Module, path: str, only_rank0: bool = False, tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: if only_rank0 and dist.get_rank() != 0: @@ -109,3 +114,4 @@ def save_optimizer(self, optimizer: Optimizer, path: str, only_rank0: bool = Fal def setup_sampler(self, dataset) -> DistributedSampler: return DistributedSampler(dataset, dist.get_world_size(), dist.get_rank()) + diff --git a/applications/Chat/coati/trainer/strategies/naive.py b/applications/Chat/coati/trainer/strategies/naive.py index bb47e5ab2688..a22be1181fb8 100644 --- a/applications/Chat/coati/trainer/strategies/naive.py +++ b/applications/Chat/coati/trainer/strategies/naive.py @@ -72,3 +72,15 @@ def save_optimizer(self, optimizer: Optimizer, path: str, only_rank0: bool = Fal def load_optimizer(self, optimizer: Optimizer, path: str, map_location: Any = None) -> None: state_dict = torch.load(path, map_location=map_location) optimizer.load_state_dict(state_dict) + + def get_model_state_dict_shard(self, model: nn.Module, **config): + # TODO: implement sharding on naive strategy + state_dict = model.state_dict() + yield state_dict + + def merge_lora_weight(self, model: nn.Module): + unwrapped_model = self._unwrap_model(model) + for module in unwrapped_model.modules(): + if isinstance(module, LoraLinear): + module.merge_weights = True + module.eval() \ No newline at end of file From be4c3f5a2780dee2de974a39d5a1cd9c71f60c16 Mon Sep 17 00:00:00 2001 From: Hongxin Liu Date: Fri, 21 Apr 2023 16:40:05 +0800 Subject: [PATCH 02/26] [chat] add perfomance evaluator and fix bugs (#10) * [chat] add performance evaluator for ray * [chat] refactor debug arg * [chat] support hf config * [chat] fix generation * [chat] add 1mmt dummy example * [chat] fix gemini ckpt --- applications/Chat/coati/models/generation.py | 3 +- applications/Chat/coati/ray/example/1m1t.py | 71 ++++--- .../Chat/coati/ray/example/1m1t_quantize.py | 63 +++--- .../Chat/coati/ray/example/1mmt_dummy.py | 186 ++++++++++++++++++ .../coati/ray/src/detached_replay_buffer.py | 41 ++-- .../coati/ray/src/detached_trainer_base.py | 58 ++++-- .../coati/ray/src/detached_trainer_ppo.py | 154 +++++++++------ .../coati/ray/src/experience_maker_holder.py | 119 ++++++++--- applications/Chat/coati/ray/src/utils.py | 75 ++++--- .../callbacks/performance_evaluator.py | 89 +++++++++ applications/Chat/coati/trainer/ppo.py | 37 ++-- 11 files changed, 661 insertions(+), 235 deletions(-) create mode 100644 applications/Chat/coati/ray/example/1mmt_dummy.py diff --git a/applications/Chat/coati/models/generation.py b/applications/Chat/coati/models/generation.py index eb30c36d0f84..961f2aec677d 100644 --- a/applications/Chat/coati/models/generation.py +++ b/applications/Chat/coati/models/generation.py @@ -76,8 +76,7 @@ def sample(model: nn.Module, # update generated ids, model inputs for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) if update_model_kwargs_fn is not None: - model_kwargs = update_model_kwargs_fn(outputs, **model_kwargs) - + model_kwargs = update_model_kwargs_fn(outputs, model_kwargs) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) diff --git a/applications/Chat/coati/ray/example/1m1t.py b/applications/Chat/coati/ray/example/1m1t.py index a6527370505b..4ad724c1e354 100644 --- a/applications/Chat/coati/ray/example/1m1t.py +++ b/applications/Chat/coati/ray/example/1m1t.py @@ -1,25 +1,26 @@ import argparse +import os +import socket from copy import deepcopy import pandas as pd +import ray import torch -from coati.trainer import PPOTrainer - - -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder +from coati.experience_maker import NaiveExperienceMaker from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer - +from coati.ray.src.experience_maker_holder import ExperienceMakerHolder +from coati.trainer import PPOTrainer +from coati.trainer.callbacks.performance_evaluator import ( + ExperienceMakerPerformanceEvaluator, + TrainerPerformaceEvaluator, +) from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from coati.experience_maker import NaiveExperienceMaker from torch.optim import Adam from transformers import AutoTokenizer, BloomTokenizerFast from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from colossalai.nn.optimizer import HybridAdam -import ray -import os -import socket def get_free_port(): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: @@ -31,24 +32,29 @@ def get_local_ip(): with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: s.connect(('8.8.8.8', 80)) return s.getsockname()[0] - + + def main(args): master_addr = str(get_local_ip()) # trainer_env_info trainer_port = str(get_free_port()) - env_info_trainer = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '1', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - + env_info_trainer = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '1', + 'master_port': trainer_port, + 'master_addr': master_addr + } + # maker_env_info maker_port = str(get_free_port()) - env_info_maker = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '1', - 'master_port' : maker_port, - 'master_addr' : master_addr} + env_info_maker = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '1', + 'master_port': maker_port, + 'master_addr': master_addr + } # configure tokenizer if args.model == 'gpt2': @@ -67,20 +73,21 @@ def main(args): experience_maker_holder_name_list=["maker1"], strategy=args.trainer_strategy, model=args.model, - env_info = env_info_trainer, + env_info=env_info_trainer, pretrained=args.pretrain, lora_rank=args.lora_rank, train_batch_size=args.train_batch_size, buffer_limit=16, experience_batch_size=args.experience_batch_size, max_epochs=args.max_epochs, - #kwargs: + # kwargs: max_length=128, do_sample=True, temperature=1.0, top_k=50, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, + eval_performance=True, debug=args.debug, ) @@ -88,16 +95,17 @@ def main(args): experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( detached_trainer_name_list=["trainer1"], strategy=args.maker_strategy, - env_info = env_info_maker, + env_info=env_info_maker, experience_batch_size=args.experience_batch_size, kl_coef=0.1, - #kwargs: + # kwargs: max_length=128, do_sample=True, temperature=1.0, top_k=50, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, + eval_performance=True, debug=args.debug, ) @@ -113,19 +121,24 @@ def tokenize_fn(texts): batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) return {k: v.cuda() for k, v in batch.items()} - trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * args.max_epochs + 3 # +3 for fault tolerance + trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, + max_timesteps=args.max_timesteps, + update_timesteps=args.update_timesteps) + num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ + args.max_epochs + 3 # +3 for fault tolerance maker_done_ref = experience_holder_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - + ray.get([trainer_done_ref, maker_done_ref]) # save model checkpoint after fitting trainer_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) # save optimizer checkpoint on all ranks if args.need_optim_ckpt: - trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), + trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % + (torch.cuda.current_device()), only_rank0=False) + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('prompt_path') diff --git a/applications/Chat/coati/ray/example/1m1t_quantize.py b/applications/Chat/coati/ray/example/1m1t_quantize.py index 12a60fd65d8b..dc9c9bf9a1f3 100644 --- a/applications/Chat/coati/ray/example/1m1t_quantize.py +++ b/applications/Chat/coati/ray/example/1m1t_quantize.py @@ -1,16 +1,16 @@ import argparse -import pandas as pd -import torch -import ray import os import socket -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder +import pandas as pd +import ray +import torch from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer - +from coati.ray.src.experience_maker_holder import ExperienceMakerHolder from transformers import AutoTokenizer, BloomTokenizerFast from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer + def get_free_port(): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('', 0)) @@ -21,25 +21,29 @@ def get_local_ip(): with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: s.connect(('8.8.8.8', 80)) return s.getsockname()[0] - + def main(args): master_addr = str(get_local_ip()) # trainer_env_info trainer_port = str(get_free_port()) - env_info_trainer = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '1', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - + env_info_trainer = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '1', + 'master_port': trainer_port, + 'master_addr': master_addr + } + # maker_env_info maker_port = str(get_free_port()) - env_info_maker = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '1', - 'master_port' : maker_port, - 'master_addr' : master_addr} + env_info_maker = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '1', + 'master_port': maker_port, + 'master_addr': master_addr + } # configure tokenizer if args.model == 'gpt2': @@ -58,14 +62,14 @@ def main(args): experience_maker_holder_name_list=["maker1"], strategy=args.trainer_strategy, model=args.model, - env_info = env_info_trainer, + env_info=env_info_trainer, pretrained=args.pretrain, lora_rank=args.lora_rank, train_batch_size=args.train_batch_size, buffer_limit=16, experience_batch_size=args.experience_batch_size, max_epochs=args.max_epochs, - #kwargs: + # kwargs: max_length=128, do_sample=True, temperature=1.0, @@ -73,16 +77,17 @@ def main(args): pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, debug=args.debug, + eval_performance=True, ) # configure Experience Maker experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( detached_trainer_name_list=["trainer1"], strategy=args.maker_strategy, - env_info = env_info_maker, + env_info=env_info_maker, experience_batch_size=args.experience_batch_size, kl_coef=0.1, - #kwargs: + # kwargs: max_length=128, do_sample=True, temperature=1.0, @@ -90,14 +95,13 @@ def main(args): pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, debug=args.debug, + eval_performance=True, ) # a 'jump wire' to set quantized initial_model and reward_model - # trainer send its actor and critic to experience holders. # ray.get(trainer_ref.initialize_remote_makers.remote()) - # configure sampler dataset = pd.read_csv(args.prompt_path)['prompt'] @@ -107,19 +111,24 @@ def tokenize_fn(texts): batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) return {k: v.cuda() for k, v in batch.items()} - trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * args.max_epochs + 3 # +3 for fault tolerance + trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, + max_timesteps=args.max_timesteps, + update_timesteps=args.update_timesteps) + num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ + args.max_epochs + 3 # +3 for fault tolerance maker_done_ref = experience_holder_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - + ray.get([trainer_done_ref, maker_done_ref]) # save model checkpoint after fitting trainer_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) # save optimizer checkpoint on all ranks if args.need_optim_ckpt: - trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), + trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % + (torch.cuda.current_device()), only_rank0=False) + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('prompt_path') diff --git a/applications/Chat/coati/ray/example/1mmt_dummy.py b/applications/Chat/coati/ray/example/1mmt_dummy.py new file mode 100644 index 000000000000..68b666663b12 --- /dev/null +++ b/applications/Chat/coati/ray/example/1mmt_dummy.py @@ -0,0 +1,186 @@ +import argparse +import os +import socket +from copy import deepcopy +from functools import partial + +import ray +import torch +from coati.models.base import RewardModel +from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.src.experience_maker_holder import ExperienceMakerHolder +from coati.ray.src.utils import get_actor_from_args, get_critic_from_args, get_reward_model_from_args +from transformers import AutoTokenizer, BloomTokenizerFast +from transformers.models.gpt2.configuration_gpt2 import GPT2Config +from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer + + +def get_gpt_config(model_name: str) -> GPT2Config: + model_map = { + 's': GPT2Config(), + 'm': GPT2Config(n_embd=1024, n_layer=24, n_head=16), + 'l': GPT2Config(n_embd=1280, n_layer=36, n_head=20), + 'xl': GPT2Config(n_embd=1600, n_layer=48, n_head=25), + '2b': GPT2Config(n_embd=2048, n_layer=40, n_head=16), + '4b': GPT2Config(n_embd=2304, n_layer=64, n_head=16), + '6b': GPT2Config(n_embd=4096, n_layer=30, n_head=16), + '8b': GPT2Config(n_embd=4096, n_layer=40, n_head=16), + '10b': GPT2Config(n_embd=4096, n_layer=50, n_head=16), + '12b': GPT2Config(n_embd=4096, n_layer=60, n_head=16), + '15b': GPT2Config(n_embd=4096, n_layer=78, n_head=16), + '18b': GPT2Config(n_embd=4096, n_layer=90, n_head=16), + '20b': GPT2Config(n_embd=8192, n_layer=25, n_head=16), + '24b': GPT2Config(n_embd=8192, n_layer=30, n_head=16), + '28b': GPT2Config(n_embd=8192, n_layer=35, n_head=16), + '32b': GPT2Config(n_embd=8192, n_layer=40, n_head=16), + '36b': GPT2Config(n_embd=8192, n_layer=45, n_head=16), + '40b': GPT2Config(n_embd=8192, n_layer=50, n_head=16), + '175b': GPT2Config(n_positions=2048, n_embd=12288, n_layer=96, n_head=96), + } + try: + return model_map[model_name] + except KeyError: + raise ValueError(f'Unknown model "{model_name}"') + + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_trainers), + 'master_port': trainer_port, + 'master_addr': master_addr + } for rank in range(args.num_trainers)] + + # maker_env_info + maker_port = str(get_free_port()) + env_info_maker = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '1', + 'master_port': maker_port, + 'master_addr': master_addr + } + + # configure tokenizer + tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + tokenizer.pad_token = tokenizer.eos_token + + # configure Trainer + trainer_refs = [ + DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( + experience_maker_holder_name_list=["maker1"], + strategy=args.trainer_strategy, + model=args.model, + env_info=env_info_trainer, + pretrained=args.pretrain, + lora_rank=args.lora_rank, + train_batch_size=args.train_batch_size, + buffer_limit=16, + experience_batch_size=args.experience_batch_size, + max_epochs=args.max_epochs, + # kwargs: + max_length=512, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + eval_performance=True, + debug=args.debug, + ) for i, env_info_trainer in enumerate(env_info_trainers) + ] + + # configure Experience Maker + experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( + detached_trainer_name_list=[f'trainer{i}' for i in range(args.num_trainers)], + strategy=args.maker_strategy, + env_info=env_info_maker, + experience_batch_size=args.experience_batch_size, + kl_coef=0.1, + # kwargs: + max_length=512, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + eval_performance=True, + use_cache=True, + debug=args.debug, + ) + + def init_inference_model(fn, model_name, pretrained): + model = fn(model_name, pretrained) + return model.half().cuda() + + # init maker locally + ray.get( + experience_holder_ref.initialize_experience_maker_local.remote( + initial_model_func=partial(init_inference_model, get_actor_from_args, args.model, args.pretrain), + reward_model_func=partial(init_inference_model, get_reward_model_from_args, args.model, args.pretrain), + actor_func=partial(init_inference_model, get_actor_from_args, args.model, args.pretrain), + critic_func=partial(init_inference_model, get_critic_from_args, args.model, args.pretrain), + )) + + # configure sampler + random_prompts = torch.randint(tokenizer.vocab_size, (1000, 400)) + + def tokenize_fn(texts): + # print(texts) + input_ids = torch.stack(texts).cuda() + # print(input_ids.shape) + attn_mask = torch.ones_like(input_ids) + return {'input_ids': input_ids, 'attention_mask': attn_mask} + + wait_tasks = [] + + for trainer_ref in trainer_refs: + wait_tasks.append( + trainer_ref.fit.remote(num_episodes=args.num_episodes, + max_timesteps=args.max_timesteps, + update_timesteps=args.update_timesteps)) + + num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ + args.max_epochs * args.num_trainers + 3 # +3 for fault tolerance + wait_tasks.append(experience_holder_ref.workingloop.remote(random_prompts, tokenize_fn, times=num_exp_per_maker)) + + ray.get(wait_tasks) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--num_trainers', type=int, default=1) + parser.add_argument('--trainer_strategy', + choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], + default='naive') + parser.add_argument('--maker_strategy', choices=['naive'], default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--num_episodes', type=int, default=10) + parser.add_argument('--max_timesteps', type=int, default=10) + parser.add_argument('--update_timesteps', type=int, default=10) + parser.add_argument('--max_epochs', type=int, default=5) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + ray.init(namespace=os.environ["RAY_NAMESPACE"]) + main(args) diff --git a/applications/Chat/coati/ray/src/detached_replay_buffer.py b/applications/Chat/coati/ray/src/detached_replay_buffer.py index 855eee48c5a5..4bc74bb878fd 100644 --- a/applications/Chat/coati/ray/src/detached_replay_buffer.py +++ b/applications/Chat/coati/ray/src/detached_replay_buffer.py @@ -1,22 +1,24 @@ -import torch +import asyncio +import copy import random -from typing import List, Any -# from torch.multiprocessing import Queue -from ray.util.queue import Queue +from threading import Lock +from typing import Any, List + import ray -import asyncio +import torch from coati.experience_maker.base import Experience -from coati.replay_buffer.utils import BufferItem, make_experience_batch, split_experience_batch from coati.replay_buffer import ReplayBuffer -from threading import Lock -import copy +from coati.replay_buffer.utils import BufferItem, make_experience_batch, split_experience_batch +# from torch.multiprocessing import Queue +from ray.util.queue import Queue + class DetachedReplayBuffer: ''' - Detached replay buffer. Share Experience across workers on the same node. - Therefore a trainer node is expected to have only one instance. + Detached replay buffer. Share Experience across workers on the same node. + Therefore a trainer node is expected to have only one instance. It is ExperienceMakerHolder's duty to call append(exp) method, remotely. - + Args: sample_batch_size: Batch size when sampling. Exp won't enqueue until they formed a batch. tp_world_size: Number of workers in the same tp group @@ -24,13 +26,16 @@ class DetachedReplayBuffer: cpu_offload: Whether to offload experience to cpu when sampling. Defaults to True. ''' - def __init__(self, sample_batch_size: int, tp_world_size: int = 1, limit : int = 0, cpu_offload: bool = True) -> None: + def __init__(self, + sample_batch_size: int, + tp_world_size: int = 1, + limit: int = 0, + cpu_offload: bool = True) -> None: self.cpu_offload = cpu_offload self.sample_batch_size = sample_batch_size self.limit = limit - self.items = Queue(self.limit, actor_options={"num_cpus":1}) - self.batch_collector : List[BufferItem] = [] - + self.items = Queue(self.limit, actor_options={"num_cpus": 1}) + self.batch_collector: List[BufferItem] = [] ''' Workers in the same tp group share this buffer and need same sample for one step. Therefore a held_sample should be returned tp_world_size times before it could be dropped. @@ -62,9 +67,9 @@ def clear(self) -> None: self.items = Queue(self.limit) self.worker_state = [False] * self.tp_world_size self.batch_collector = [] - + @torch.no_grad() - def sample(self, worker_rank = 0, to_device = "cpu") -> Experience: + def sample(self, worker_rank=0, to_device="cpu") -> Experience: self._worker_state_lock.acquire() if not any(self.worker_state): self.held_sample = self._sample_and_erase() @@ -85,4 +90,4 @@ def _sample_and_erase(self) -> Experience: def get_length(self) -> int: ret = self.items.qsize() - return ret \ No newline at end of file + return ret diff --git a/applications/Chat/coati/ray/src/detached_trainer_base.py b/applications/Chat/coati/ray/src/detached_trainer_base.py index f5e52e8a3b3a..3558f58017a6 100644 --- a/applications/Chat/coati/ray/src/detached_trainer_base.py +++ b/applications/Chat/coati/ray/src/detached_trainer_base.py @@ -1,17 +1,19 @@ +import os from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Optional, Union -from tqdm import tqdm -from coati.trainer.callbacks import Callback -from coati.experience_maker import Experience + import ray -import os +from coati.experience_maker import Experience +from coati.trainer.callbacks import Callback +from tqdm import tqdm from .detached_replay_buffer import DetachedReplayBuffer from .utils import is_rank_0 + class DetachedTrainer(ABC): ''' - Base class for detached rlhf trainers. + Base class for detached rlhf trainers. 'detach' means that the experience maker is detached compared to a normal Trainer. Please set name attribute during init: >>> trainer = DetachedTrainer.options(..., name = "xxx", ...).remote() @@ -36,9 +38,12 @@ def __init__(self, max_epochs: int = 1, dataloader_pin_memory: bool = True, callbacks: List[Callback] = [], + debug: bool = False, **generate_kwargs) -> None: super().__init__() - self.detached_replay_buffer = DetachedReplayBuffer(train_batch_size, limit=buffer_limit, cpu_offload=buffer_cpu_offload) + self.detached_replay_buffer = DetachedReplayBuffer(train_batch_size, + limit=buffer_limit, + cpu_offload=buffer_cpu_offload) self.experience_batch_size = experience_batch_size self.max_epochs = max_epochs self.dataloader_pin_memory = dataloader_pin_memory @@ -46,11 +51,8 @@ def __init__(self, self.generate_kwargs = generate_kwargs self.target_holder_name_list = experience_maker_holder_name_list self.target_holder_list = [] - - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - self._debug = True - else: - self._debug = False + + self._debug = debug def update_target_holder_list(self, experience_maker_holder_name_list): self.target_holder_name_list = experience_maker_holder_name_list @@ -69,13 +71,15 @@ def training_step(self, experience: Experience) -> Dict[str, Any]: def _learn(self): pbar = tqdm(range(self.max_epochs), desc='Train epoch', disable=not is_rank_0()) for _ in pbar: - if self._debug: + if self._debug: print("[trainer] sampling exp") experience = self._buffer_sample() - if self._debug: + if self._debug: print("[trainer] training step") + self._on_learn_batch_start() metrics = self.training_step(experience) - if self._debug: + self._on_learn_batch_end(metrics, experience) + if self._debug: print("[trainer] step over") pbar.set_postfix(metrics) @@ -90,18 +94,19 @@ def fit(self, num_episodes: int = 50000, max_timesteps: int = 500, update_timest self._update_remote_makers() self._on_episode_end(episode) self._on_fit_end() + self._on_finish() @ray.method(concurrency_group="buffer_length") def buffer_get_length(self): # called by ExperienceMakerHolder - if self._debug: + if self._debug: print("[trainer] telling length") return self.detached_replay_buffer.get_length() @ray.method(concurrency_group="buffer_append") def buffer_append(self, experience: Experience): # called by ExperienceMakerHolder - if self._debug: + if self._debug: print(f"[trainer] receiving exp.") self.detached_replay_buffer.append(experience) @@ -124,3 +129,24 @@ def _on_episode_start(self, episode: int) -> None: def _on_episode_end(self, episode: int) -> None: for callback in self.callbacks: callback.on_episode_end(episode) + + def _on_learn_epoch_start(self, epoch: int) -> None: + for callback in self.callbacks: + callback.on_learn_epoch_start(epoch) + + def _on_learn_epoch_end(self, epoch: int) -> None: + for callback in self.callbacks: + callback.on_learn_epoch_end(epoch) + + def _on_learn_batch_start(self) -> None: + for callback in self.callbacks: + callback.on_learn_batch_start() + + def _on_learn_batch_end(self, metrics: dict, experience: Experience) -> None: + for callback in self.callbacks: + callback.on_learn_batch_end(metrics, experience) + + def _on_finish(self) -> None: + for callback in self.callbacks: + if hasattr(callback, 'on_finish'): + callback.on_finish() diff --git a/applications/Chat/coati/ray/src/detached_trainer_ppo.py b/applications/Chat/coati/ray/src/detached_trainer_ppo.py index 071f0ddab2b9..2850f1cf1d37 100644 --- a/applications/Chat/coati/ray/src/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/src/detached_trainer_ppo.py @@ -1,26 +1,37 @@ from typing import Any, Callable, Dict, List, Optional -import torch -from torch.optim import Adam +import ray +import torch from coati.experience_maker import Experience, NaiveExperienceMaker from coati.models.base import Actor, Critic from coati.models.generation_utils import update_model_kwargs_fn from coati.models.loss import PolicyLoss, ValueLoss -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy, Strategy from coati.trainer.callbacks import Callback +from coati.trainer.callbacks.performance_evaluator import TrainerPerformaceEvaluator +from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy, Strategy +from torch.optim import Adam from colossalai.nn.optimizer import HybridAdam -import ray - - -from .utils import is_rank_0, get_actor_from_args, get_critic_from_args, get_strategy_from_args, set_dist_env, \ - state_dict_to - from .detached_trainer_base import DetachedTrainer - - -@ray.remote(concurrency_groups={"buffer_length": 1, "buffer_append": 1, "buffer_sample": 1, "model_io": 1, "compute": 1}) +from .utils import ( + get_actor_from_args, + get_critic_from_args, + get_model_numel, + get_strategy_from_args, + is_rank_0, + set_dist_env, + state_dict_to, +) + + +@ray.remote(concurrency_groups={ + "buffer_length": 1, + "buffer_append": 1, + "buffer_sample": 1, + "model_io": 1, + "compute": 1 +}) class DetachedPPOTrainer(DetachedTrainer): ''' Detached Trainer for PPO algorithm @@ -42,26 +53,29 @@ class DetachedPPOTrainer(DetachedTrainer): generate_kwargs (dict, optional): the kwargs to use while model generating ''' - def __init__(self, - experience_maker_holder_name_list: List[str], - strategy: str, - model: str, - pretrained: str = None, - lora_rank: int = 0, - cr_model: str = None, # if not None, use below cr settings for critic - cr_pretrained: str = None, - cr_lora_rank: int = 0, - env_info: Dict[str, str] = None, - train_batch_size: int = 8, - buffer_limit: int = 0, - buffer_cpu_offload: bool = True, - eps_clip: float = 0.2, - value_clip: float = 0.4, - experience_batch_size: int = 8, - max_epochs: int = 10, - dataloader_pin_memory: bool = True, - callbacks: List[Callback] = [], - **generate_kwargs) -> None: + def __init__( + self, + experience_maker_holder_name_list: List[str], + strategy: str, + model: str, + pretrained: str = None, + lora_rank: int = 0, + cr_model: str = None, # if not None, use below cr settings for critic + cr_pretrained: str = None, + cr_lora_rank: int = 0, + env_info: Dict[str, str] = None, + train_batch_size: int = 8, + buffer_limit: int = 0, + buffer_cpu_offload: bool = True, + eps_clip: float = 0.2, + value_clip: float = 0.4, + experience_batch_size: int = 8, + max_epochs: int = 10, + dataloader_pin_memory: bool = True, + callbacks: List[Callback] = [], + eval_performance: bool = False, + debug: bool = False, + **generate_kwargs) -> None: # set environment variables if env_info: set_dist_env(env_info=env_info) @@ -77,10 +91,15 @@ def __init__(self, self.actor = get_actor_from_args(model, pretrained, lora_rank) self.critic = get_critic_from_args(cr_model, cr_pretrained, cr_lora_rank) - if strategy != 'colossalai_gemini': - self.actor.to(torch.cuda.current_device()) #.to(torch.float16) - self.critic.to(torch.cuda.current_device()) #.to(torch.float16) + if eval_performance: + actor_numel = get_model_numel(self.actor) + critic_numel = get_model_numel(self.critic) + evaluator = TrainerPerformaceEvaluator(actor_numel, critic_numel) + callbacks = callbacks + [evaluator] + if strategy != 'colossalai_gemini': + self.actor.to(torch.cuda.current_device()) # .to(torch.float16) + self.critic.to(torch.cuda.current_device()) # .to(torch.float16) if strategy.startswith('colossalai'): self.actor_optim = HybridAdam(self.actor.parameters(), lr=1e-7) @@ -105,6 +124,7 @@ def __init__(self, max_epochs=max_epochs, dataloader_pin_memory=dataloader_pin_memory, callbacks=callbacks, + debug=debug, **generate_kwargs) # for remote maker initialization @@ -114,33 +134,39 @@ def __init__(self, self._cr_pretrained = cr_pretrained @ray.method(concurrency_group="model_io") + @torch.no_grad() def _update_remote_makers(self, **config): # TODO: balance duties if is_rank_0(): self.update_target_holder_list(self.target_holder_name_list) - with torch.no_grad(): # actor: - # mark start - for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(chunk_start=True) - # sending loop - for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_actor(self.actor), **config): - for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(new_actor_state_dict = state_dict_shard) - # mark end - for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(chunk_end=True) - # critic - # mark start + if is_rank_0(): + # mark start + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(chunk_start=True) + # sending loop + for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_model(self.actor), **config): + if is_rank_0(): for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(chunk_start=True) - # sending loop - for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_critic(self.critic), **config): - for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(new_critic_state_dict = state_dict_shard) - # mark end + target_holder.update_experience_maker.remote(new_actor_state_dict=state_dict_shard) + if is_rank_0(): + # mark end + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(chunk_end=True) + # critic + if is_rank_0(): + # mark start + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(chunk_start=True) + # sending loop + for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_critic(self.critic), **config): + if is_rank_0(): for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(chunk_end=True) + target_holder.update_experience_maker.remote(new_critic_state_dict=state_dict_shard) + if is_rank_0(): + # mark end + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(chunk_end=True) @ray.method(concurrency_group="model_io") def initialize_remote_makers(self, **config): @@ -148,23 +174,29 @@ def initialize_remote_makers(self, **config): if is_rank_0(): self.update_target_holder_list(self.target_holder_name_list) with torch.no_grad(): - # actor / initial_model: + # actor / initial_model: # mark start for target_holder in self.target_holder_list: - target_holder.initialize_experience_maker.remote(actor_model=self._model_str,actor_pretrained=self._pretrained,chunk_start=True) + target_holder.initialize_experience_maker.remote(actor_model=self._model_str, + actor_pretrained=self._pretrained, + chunk_start=True) # sending loop - for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_actor(self.actor), **config): + for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_actor(self.actor), + **config): for target_holder in self.target_holder_list: target_holder.initialize_experience_maker.remote(actor_state_dict=state_dict_shard) # mark end for target_holder in self.target_holder_list: target_holder.initialize_experience_maker.remote(actor_model=self._model_str, chunk_end=True) # critic / reward_model: - # mark start + # mark start for target_holder in self.target_holder_list: - target_holder.initialize_experience_maker.remote(critic_model=self._cr_model_str,critic_pretrained=self._cr_pretrained,chunk_start=True) + target_holder.initialize_experience_maker.remote(critic_model=self._cr_model_str, + critic_pretrained=self._cr_pretrained, + chunk_start=True) # sending loop - for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_critic(self.critic), **config): + for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_critic(self.critic), + **config): for target_holder in self.target_holder_list: target_holder.initialize_experience_maker.remote(critic_state_dict=state_dict_shard) # mark end diff --git a/applications/Chat/coati/ray/src/experience_maker_holder.py b/applications/Chat/coati/ray/src/experience_maker_holder.py index 67b89a68119a..b1acdbb5494d 100644 --- a/applications/Chat/coati/ray/src/experience_maker_holder.py +++ b/applications/Chat/coati/ray/src/experience_maker_holder.py @@ -1,22 +1,31 @@ -import torch +import os +import time +import tracemalloc +from copy import deepcopy +from threading import Lock from typing import Any, Callable, Dict, List, Optional, Union + import ray -from ray.exceptions import GetTimeoutError -from torch import Tensor +import torch import torch.nn as nn +from coati.experience_maker import Experience, ExperienceMaker, NaiveExperienceMaker from coati.models.base import Actor, Critic, RewardModel -from coati.trainer.strategies.sampler import DistributedSampler +from coati.trainer.callbacks import Callback +from coati.trainer.callbacks.performance_evaluator import ExperienceMakerPerformanceEvaluator from coati.trainer.strategies import Strategy -from coati.experience_maker import NaiveExperienceMaker, Experience, ExperienceMaker - -from copy import deepcopy -from threading import Lock -import time -import os -import tracemalloc +from coati.trainer.strategies.sampler import DistributedSampler +from ray.exceptions import GetTimeoutError +from torch import Tensor -from .utils import is_rank_0, get_strategy_from_args, set_dist_env, get_actor_from_args, get_critic_from_args, \ - get_reward_model_from_args +from .utils import ( + get_actor_from_args, + get_critic_from_args, + get_model_numel, + get_reward_model_from_args, + get_strategy_from_args, + is_rank_0, + set_dist_env, +) @ray.remote(concurrency_groups={"experience_io": 1, "model_io": 1, "compute": 1}) @@ -24,7 +33,7 @@ class ExperienceMakerHolder: ''' Args: detached_trainer_name_list: str list to get ray actor handles - strategy: + strategy: experience_batch_size: batch size of generated experience kl_coef: the coefficient of kl divergence loss ''' @@ -35,6 +44,9 @@ def __init__(self, env_info: Dict[str, str] = None, experience_batch_size: int = 8, kl_coef: float = 0.1, + callbacks: List[Callback] = [], + eval_performance: bool = False, + debug: bool = False, **generate_kwargs): # set environment variables if env_info: @@ -49,6 +61,8 @@ def __init__(self, self.generate_kwargs = generate_kwargs actor, critic, reward_model, initial_model = None, None, None, None self.experience_maker = NaiveExperienceMaker(actor, critic, reward_model, initial_model, self.kl_coef) + self.callbacks = callbacks + self.eval_performance = eval_performance self._model_visit_lock = Lock() self._initial_model_initialized = False @@ -56,10 +70,7 @@ def __init__(self, self._actor_initialized = False self._critic_initialized = False - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - self._debug = True - else: - self._debug = False + self._debug = debug self.target_auto_balance = False if self._debug: @@ -68,6 +79,17 @@ def __init__(self, def _get_ready(self): while not self._fully_initialized(): time.sleep(1.0) + # setup performance evaluator + if self.eval_performance: + actor_numel = get_model_numel(self.experience_maker.actor) + critic_numel = get_model_numel(self.experience_maker.critic) + initial_model_numel = get_model_numel(self.experience_maker.initial_model) + reward_model_numel = get_model_numel(self.experience_maker.reward_model) + evaluator = ExperienceMakerPerformanceEvaluator(actor_numel, critic_numel, initial_model_numel, + reward_model_numel) + self.callbacks.append(evaluator) + + self.generate_kwargs = _set_default_generate_kwargs(self.generate_kwargs, self.experience_maker.actor) def _fully_initialized(self): if not self._initial_model_initialized: @@ -139,9 +161,12 @@ def workingloop(self, dataset, tokenizer: Optional[Callable[[Any], dict]] = None else: inputs = rand_prompts self._model_visit_lock.acquire() + self._on_make_experience_start() experience = self._make_experience(inputs=inputs) + self._on_make_experience_end(experience) self._model_visit_lock.release() self._send_experience(experience=experience) + self._on_finish() @ray.method(concurrency_group="model_io") def initialize_experience_maker(self, @@ -158,7 +183,7 @@ def initialize_experience_maker(self, chunk_start: Set True at the first call. Before sending state_dict calls chunk_end: Set True at the last call. After sending state_dict calls. - TODO: load_state_dict integrate with model-sharding strategy + TODO: load_state_dict integrate with model-sharding strategy ''' if self._fully_initialized(): return @@ -170,13 +195,17 @@ def initialize_experience_maker(self, # (csric) any better way to get model structure? with self.strategy.model_init_context(): if not self._actor_initialized and actor_model is not None: - self.experience_maker.actor = get_actor_from_args(actor_model, actor_pretrained).half().requires_grad_(False) + self.experience_maker.actor = get_actor_from_args(actor_model, + actor_pretrained).half().requires_grad_(False) if not self._critic_initialized and critic_model is not None: - self.experience_maker.critic = get_critic_from_args(critic_model, critic_pretrained).half().requires_grad_(False) + self.experience_maker.critic = get_critic_from_args( + critic_model, critic_pretrained).half().requires_grad_(False) if not self._initial_model_initialized and actor_model is not None: - self.experience_maker.initial_model = get_actor_from_args(actor_model, actor_pretrained).half().requires_grad_(False) + self.experience_maker.initial_model = get_actor_from_args( + actor_model, actor_pretrained).half().requires_grad_(False) if not self._reward_model_initialized and critic_model is not None: - self.experience_maker.reward_model = get_reward_model_from_args(critic_model, critic_pretrained).half().requires_grad_(False) + self.experience_maker.reward_model = get_reward_model_from_args( + critic_model, critic_pretrained).half().requires_grad_(False) with torch.no_grad(): if not self._actor_initialized and actor_state_dict is not None: @@ -188,25 +217,27 @@ def initialize_experience_maker(self, if not self._reward_model_initialized and critic_state_dict is not None: self.experience_maker.reward_model.load_state_dict(critic_state_dict, strict=False) - if chunk_end: with torch.no_grad(): if actor_model is not None: if not self._actor_initialized: - self.experience_maker.actor = self.strategy.prepare(self.experience_maker.actor.to(torch.cuda.current_device())) + self.experience_maker.actor = self.strategy.prepare( + self.experience_maker.actor.to(torch.cuda.current_device())) if not self._initial_model_initialized: - self.experience_maker.initial_model = self.strategy.prepare(self.experience_maker.initial_model.to(torch.cuda.current_device())) + self.experience_maker.initial_model = self.strategy.prepare( + self.experience_maker.initial_model.to(torch.cuda.current_device())) self._actor_initialized = True self._initial_model_initialized = True if critic_model is not None: if not self._critic_initialized: - self.experience_maker.critic = self.strategy.prepare(self.experience_maker.critic.to(torch.cuda.current_device())) + self.experience_maker.critic = self.strategy.prepare( + self.experience_maker.critic.to(torch.cuda.current_device())) if not self._reward_model_initialized: - self.experience_maker.reward_model = self.strategy.prepare(self.experience_maker.reward_model.to(torch.cuda.current_device())) + self.experience_maker.reward_model = self.strategy.prepare( + self.experience_maker.reward_model.to(torch.cuda.current_device())) self._critic_initialized = True self._reward_model_initialized = True - def initialize_experience_maker_local(self, initial_model_func=None, reward_model_func=None, @@ -241,10 +272,10 @@ def update_experience_maker(self, called by trainer chunk_start: Set True at the first call. Before sending state_dict calls chunk_end: Set True at the last call. After sending state_dict calls. - + TODO: load_state_dict integrate with model-sharding strategy ''' - _watch_memory = True + _watch_memory = self._debug if chunk_start: if self._debug: print("[maker] UPDATE ") @@ -264,3 +295,29 @@ def update_experience_maker(self, current, peak = tracemalloc.get_traced_memory() print(f"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB") tracemalloc.stop() + + def _on_make_experience_start(self) -> None: + for callback in self.callbacks: + callback.on_make_experience_start() + + def _on_make_experience_end(self, experience: Experience) -> None: + for callback in self.callbacks: + callback.on_make_experience_end(experience) + + def _on_finish(self) -> None: + for callback in self.callbacks: + if hasattr(callback, 'on_finish'): + callback.on_finish() + + +def _set_default_generate_kwargs(generate_kwargs: dict, actor: Actor) -> None: + origin_model = actor.model + new_kwargs = {**generate_kwargs} + # use huggingface models method directly + if 'prepare_inputs_fn' not in generate_kwargs and hasattr(origin_model, 'prepare_inputs_for_generation'): + new_kwargs['prepare_inputs_fn'] = origin_model.prepare_inputs_for_generation + + if 'update_model_kwargs_fn' not in generate_kwargs and hasattr(origin_model, '_update_model_kwargs_for_generation'): + new_kwargs['update_model_kwargs_fn'] = origin_model._update_model_kwargs_for_generation + + return new_kwargs diff --git a/applications/Chat/coati/ray/src/utils.py b/applications/Chat/coati/ray/src/utils.py index 827c2b8c6dc9..1b14e1c3f1cb 100644 --- a/applications/Chat/coati/ray/src/utils.py +++ b/applications/Chat/coati/ray/src/utils.py @@ -1,65 +1,71 @@ -import torch.distributed as dist +import os from typing import Any, Callable, Dict, List, Optional -from coati.models.bloom import BLOOMActor, BLOOMCritic, BLOOMRM -from coati.models.gpt import GPTActor, GPTCritic, GPTRM -from coati.models.opt import OPTActor, OPTCritic, OPTRM -from coati.models.roberta import RoBERTaRM, RoBERTaActor, RoBERTaCritic -from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy import torch -import os +import torch.distributed as dist +import torch.nn as nn +from coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic +from coati.models.gpt import GPTRM, GPTActor, GPTCritic +from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM +from coati.models.opt import OPTRM, OPTActor, OPTCritic +from coati.models.roberta import RoBERTaActor, RoBERTaCritic, RoBERTaRM +from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy +from coati.utils import prepare_llama_tokenizer_and_embedding +from transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer, RobertaTokenizer def is_rank_0() -> bool: return not dist.is_initialized() or dist.get_rank() == 0 -def get_actor_from_args(model: str, pretrained: str = None, lora_rank = 0): +def get_actor_from_args(model: str, pretrained: str = None, config=None, lora_rank=0): if model == 'gpt2': - actor = GPTActor(pretrained=pretrained, lora_rank=lora_rank) + actor = GPTActor(pretrained=pretrained, config=config, lora_rank=lora_rank) elif model == 'bloom': - actor = BLOOMActor(pretrained=pretrained, lora_rank=lora_rank) + actor = BLOOMActor(pretrained=pretrained, config=config, lora_rank=lora_rank) elif model == 'opt': - actor = OPTActor(pretrained=pretrained, lora_rank=lora_rank) + actor = OPTActor(pretrained=pretrained, config=config, lora_rank=lora_rank) elif model == 'llama': - actor = LlamaActor(pretrained=pretrained, lora_rank=lora_rank) + actor = LlamaActor(pretrained=pretrained, config=config, lora_rank=lora_rank) elif model == 'roberta': - actor = RoBERTaActor(pretrained=pretrained, lora_rank=lora_rank) + actor = RoBERTaActor(pretrained=pretrained, config=config, lora_rank=lora_rank) else: raise ValueError(f'Unsupported actor model "{model}"') return actor -def get_critic_from_args(model: str, pretrained: str = None, lora_rank = 0): + +def get_critic_from_args(model: str, pretrained: str = None, config=None, lora_rank=0): if model == 'gpt2': - critic = GPTCritic(pretrained=pretrained, lora_rank=lora_rank, use_action_mask=True) + critic = GPTCritic(pretrained=pretrained, lora_rank=lora_rank, config=config, use_action_mask=True) elif model == 'bloom': - critic = BLOOMCritic(pretrained=pretrained, lora_rank=lora_rank, use_action_mask=True) + critic = BLOOMCritic(pretrained=pretrained, lora_rank=lora_rank, config=config, use_action_mask=True) elif model == 'opt': - critic = OPTCritic(pretrained=pretrained, lora_rank=lora_rank, use_action_mask=True) + critic = OPTCritic(pretrained=pretrained, lora_rank=lora_rank, config=config, use_action_mask=True) elif model == 'llama': - critic = LlamaCritic(pretrained=pretrained, lora_rank=lora_rank, use_action_mask=True) + critic = LlamaCritic(pretrained=pretrained, lora_rank=lora_rank, config=config, use_action_mask=True) elif model == 'roberta': - critic = RoBERTaCritic(pretrained=pretrained, lora_rank=lora_rank, use_action_mask=True) + critic = RoBERTaCritic(pretrained=pretrained, lora_rank=lora_rank, config=config, use_action_mask=True) else: raise ValueError(f'Unsupported reward model "{model}"') return critic -def get_reward_model_from_args(model: str, pretrained: str = None): + +def get_reward_model_from_args(model: str, pretrained: str = None, config=None): if model == 'gpt2': - reward_model = GPTRM(pretrained=pretrained) + reward_model = GPTRM(pretrained=pretrained, config=config) elif model == 'bloom': - reward_model = BLOOMRM(pretrained=pretrained) + reward_model = BLOOMRM(pretrained=pretrained, config=config) elif model == 'opt': - reward_model = OPTRM(pretrained=pretrained) + reward_model = OPTRM(pretrained=pretrained, config=config) elif model == 'llama': - reward_model = LlamaRM(pretrained=pretrained) + reward_model = LlamaRM(pretrained=pretrained, config=config) elif model == 'roberta': - reward_model = RoBERTaRM(pretrained=pretrained) + reward_model = RoBERTaRM(pretrained=pretrained, config=config) else: raise ValueError(f'Unsupported reward model "{model}"') return reward_model + def get_strategy_from_args(strategy: str): if strategy == 'naive': strategy_ = NaiveStrategy() @@ -74,9 +80,6 @@ def get_strategy_from_args(strategy: str): return strategy_ -from transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer, RobertaTokenizer -from coati.utils import prepare_llama_tokenizer_and_embedding - def get_tokenizer_from_args(model: str, **kwargs): if model == 'gpt2': tokenizer = GPT2Tokenizer.from_pretrained('gpt2') @@ -95,6 +98,7 @@ def get_tokenizer_from_args(model: str, **kwargs): tokenizer.pad_token = tokenizer.eos_token return tokenizer + def set_dist_env(env_info: Dict[str, str]): os.environ["RANK"] = env_info['rank'] os.environ["LOCAL_RANK"] = env_info['local_rank'] @@ -103,11 +107,18 @@ def set_dist_env(env_info: Dict[str, str]): os.environ['MASTER_ADDR'] = env_info['master_addr'] -def state_dict_to(state_dict: Dict[str, Any], dtype: torch.dtype = torch.float16, device: torch.device = torch.device('cpu')): +def state_dict_to(state_dict: Dict[str, Any], + dtype: torch.dtype = torch.float16, + device: torch.device = torch.device('cpu')): ''' keep state_dict intact ''' new_state_dict = {} for k, v in state_dict.items(): - new_state_dict[k] = v.to(dtype = dtype, device = device) - return new_state_dict \ No newline at end of file + new_state_dict[k] = v.to(dtype=dtype, device=device) + return new_state_dict + + +def get_model_numel(model: nn.Module) -> int: + numel = sum(p.numel() for p in model.parameters()) + return numel diff --git a/applications/Chat/coati/trainer/callbacks/performance_evaluator.py b/applications/Chat/coati/trainer/callbacks/performance_evaluator.py index 5ca44a52d6e7..0aebd2bf6280 100644 --- a/applications/Chat/coati/trainer/callbacks/performance_evaluator.py +++ b/applications/Chat/coati/trainer/callbacks/performance_evaluator.py @@ -29,6 +29,95 @@ def all_reduce_mean(x: float, world_size: int) -> float: return tensor.item() +class ExperienceMakerPerformanceEvaluator(Callback): + + def __init__(self, actor_num_params: int, critic_num_params: int, initial_model_num_params: int, + reward_model_num_params: int) -> None: + super().__init__() + self.world_size = get_world_size() + self.actor_num_params = actor_num_params + self.critic_num_params = critic_num_params + self.initial_model_num_params = initial_model_num_params + self.reward_model_num_params = reward_model_num_params + + self.make_experience_duration: float = 0. + self.make_experience_start_time: Optional[float] = None + self.make_experience_num_samples: int = 0 + self.make_experience_flop: int = 0 + + def on_make_experience_start(self) -> None: + self.make_experience_start_time = time() + + def on_make_experience_end(self, experience: Experience) -> None: + self.make_experience_duration += time() - self.make_experience_start_time + + batch_size, seq_len = experience.sequences.shape + + self.make_experience_num_samples += batch_size + + # actor generate + num_actions = experience.action_mask.size(1) + input_len = seq_len - num_actions + total_seq_len = (input_len + seq_len - 1) * num_actions / 2 + self.make_experience_flop += self.actor_num_params * batch_size * total_seq_len * 2 + # actor forward + self.make_experience_flop += self.actor_num_params * batch_size * seq_len * 2 + # critic forward + self.make_experience_flop += self.critic_num_params * batch_size * seq_len * 2 + # initial model forward + self.make_experience_flop += self.initial_model_num_params * batch_size * seq_len * 2 + # reward model forward + self.make_experience_flop += self.reward_model_num_params * batch_size * seq_len * 2 + + def on_finish(self) -> None: + avg_make_experience_duration = all_reduce_mean(self.make_experience_duration, self.world_size) + + avg_make_experience_throughput = self.make_experience_num_samples / (avg_make_experience_duration + 1e-12) + avg_make_experience_tflops = self.make_experience_flop / 1e12 / (avg_make_experience_duration + 1e-12) + + print_rank_0( + f'Making experience throughput: {avg_make_experience_throughput:.3f} samples/sec, TFLOPS: {avg_make_experience_tflops:.3f}' + ) + + +class TrainerPerformaceEvaluator(Callback): + + def __init__(self, actor_num_params: int, critic_num_params: int, enable_grad_checkpoint: bool = False) -> None: + super().__init__() + self.world_size = get_world_size() + self.actor_num_params = actor_num_params + self.critic_num_params = critic_num_params + self.enable_grad_checkpoint = enable_grad_checkpoint + + self.learn_duration: float = 0. + self.learn_start_time: Optional[float] = None + self.learn_num_samples: int = 0 + self.learn_flop: int = 0 + + def on_learn_batch_start(self) -> None: + self.learn_start_time = time() + + def on_learn_batch_end(self, metrics: dict, experience: Experience) -> None: + self.learn_duration += time() - self.learn_start_time + + batch_size, seq_len = experience.sequences.shape + + self.learn_num_samples += batch_size + + # actor forward-backward, 3 means forward(1) + backward(2) + self.learn_flop += self.actor_num_params * batch_size * seq_len * 2 * (3 + int(self.enable_grad_checkpoint)) + # critic forward-backward + self.learn_flop += self.critic_num_params * batch_size * seq_len * 2 * (3 + int(self.enable_grad_checkpoint)) + + def on_finish(self) -> None: + avg_learn_duration = all_reduce_mean(self.learn_duration, self.world_size) + + avg_learn_throughput = self.learn_num_samples / (avg_learn_duration + 1e-12) + avg_learn_tflops = self.learn_flop / 1e12 / (avg_learn_duration + 1e-12) + + print_rank_0(f'Learning throughput: {avg_learn_throughput:.3f} samples/sec, TFLOPS: {avg_learn_tflops:.3f}') + + class PerformanceEvaluator(Callback): """ Callback for valuate the performance of the model. diff --git a/applications/Chat/coati/trainer/ppo.py b/applications/Chat/coati/trainer/ppo.py index 2db604fc9b74..89d708456c61 100644 --- a/applications/Chat/coati/trainer/ppo.py +++ b/applications/Chat/coati/trainer/ppo.py @@ -4,14 +4,13 @@ import torch.nn as nn from coati.experience_maker import Experience, NaiveExperienceMaker from coati.models.base import Actor, Critic -from coati.models.generation_utils import update_model_kwargs_fn from coati.models.loss import PolicyLoss, ValueLoss from coati.replay_buffer import NaiveReplayBuffer from torch import Tensor from torch.optim import Optimizer from torch.utils.data import DistributedSampler -from transformers.tokenization_utils_base import PreTrainedTokenizerBase from tqdm import tqdm +from transformers.tokenization_utils_base import PreTrainedTokenizerBase from .base import Trainer from .callbacks import Callback @@ -102,19 +101,16 @@ def _make_experience(self, inputs: Union[Tensor, Dict[str, Tensor]]) -> Experien def _sample_prompts(self, prompts) -> list: indices = list(range(len(prompts))) - sampled_indices = self.strategy.experience_sampler.choice( - indices, self.experience_batch_size, replace=False) + sampled_indices = self.strategy.experience_sampler.choice(indices, self.experience_batch_size, replace=False) return [prompts[i] for i in sampled_indices] def _learn(self): # replay buffer may be empty at first, we should rebuild at each training if not self.sample_replay_buffer: - dataloader = self.strategy.setup_dataloader( - self.replay_buffer, self.dataloader_pin_memory) + dataloader = self.strategy.setup_dataloader(self.replay_buffer, self.dataloader_pin_memory) device = torch.cuda.current_device() if self.sample_replay_buffer: - pbar = tqdm(range(self.max_epochs), desc='Train epoch', - disable=not is_rank_0()) + pbar = tqdm(range(self.max_epochs), desc='Train epoch', disable=not is_rank_0()) for _ in pbar: experience = self.replay_buffer.sample() metrics = self.training_step(experience) @@ -124,8 +120,7 @@ def _learn(self): self._on_learn_epoch_start(epoch) if isinstance(dataloader.sampler, DistributedSampler): dataloader.sampler.set_epoch(epoch) - pbar = tqdm( - dataloader, desc=f'Train epoch [{epoch+1}/{self.max_epochs}]', disable=not is_rank_0()) + pbar = tqdm(dataloader, desc=f'Train epoch [{epoch+1}/{self.max_epochs}]', disable=not is_rank_0()) for experience in pbar: self._on_learn_batch_start() experience.to_device(device) @@ -152,10 +147,8 @@ def fit(self, time += 1 prompts = next(iter(self.prompt_dataloader)) self._on_make_experience_start() - self.experience_maker.initial_model.to( - torch.cuda.current_device()) - self.experience_maker.reward_model.to( - torch.cuda.current_device()) + self.experience_maker.initial_model.to(torch.cuda.current_device()) + self.experience_maker.reward_model.to(torch.cuda.current_device()) experience = self._make_experience(prompts) self._on_make_experience_end(experience) self.replay_buffer.append(experience) @@ -206,11 +199,17 @@ def training_step(self, experience: Experience) -> Dict[str, float]: self.critic_optim.zero_grad() return {'reward': experience.reward.mean().item()} - - def save_model(self, path: str, only_rank0: bool = False, tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: + + def save_model(self, + path: str, + only_rank0: bool = False, + tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: self.strategy.save_model(model=self.actor, path=path, only_rank0=only_rank0, tokenizer=tokenizer) - def save_model(self, path: str, only_rank0: bool = False, tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: + def save_model(self, + path: str, + only_rank0: bool = False, + tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: self.strategy.save_model(model=self.actor, path=path, only_rank0=only_rank0, tokenizer=tokenizer) @@ -221,7 +220,7 @@ def _set_default_generate_kwargs(strategy: Strategy, generate_kwargs: dict, acto if 'prepare_inputs_fn' not in generate_kwargs and hasattr(origin_model, 'prepare_inputs_for_generation'): new_kwargs['prepare_inputs_fn'] = origin_model.prepare_inputs_for_generation - if 'update_model_kwargs_fn' not in generate_kwargs: - new_kwargs['update_model_kwargs_fn'] = update_model_kwargs_fn + if 'update_model_kwargs_fn' not in generate_kwargs and hasattr(origin_model, '_update_model_kwargs_for_generation'): + new_kwargs['update_model_kwargs_fn'] = origin_model._update_model_kwargs_for_generation return new_kwargs From bf11014f967aa93804f61d4db5e3a409a05fa770 Mon Sep 17 00:00:00 2001 From: csric <59389055+CsRic@users.noreply.github.com> Date: Fri, 21 Apr 2023 18:24:59 +0800 Subject: [PATCH 03/26] split experience to send (#11) Co-authored-by: csric --- .../coati/ray/src/experience_maker_holder.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/applications/Chat/coati/ray/src/experience_maker_holder.py b/applications/Chat/coati/ray/src/experience_maker_holder.py index b1acdbb5494d..93624c2be921 100644 --- a/applications/Chat/coati/ray/src/experience_maker_holder.py +++ b/applications/Chat/coati/ray/src/experience_maker_holder.py @@ -10,6 +10,7 @@ import torch.nn as nn from coati.experience_maker import Experience, ExperienceMaker, NaiveExperienceMaker from coati.models.base import Actor, Critic, RewardModel +from coati.replay_buffer.utils import split_experience_batch, make_experience_batch, BufferItem from coati.trainer.callbacks import Callback from coati.trainer.callbacks.performance_evaluator import ExperienceMakerPerformanceEvaluator from coati.trainer.strategies import Strategy @@ -47,6 +48,7 @@ def __init__(self, callbacks: List[Callback] = [], eval_performance: bool = False, debug: bool = False, + send_grain_size: int = 4, **generate_kwargs): # set environment variables if env_info: @@ -63,6 +65,7 @@ def __init__(self, self.experience_maker = NaiveExperienceMaker(actor, critic, reward_model, initial_model, self.kl_coef) self.callbacks = callbacks self.eval_performance = eval_performance + self.send_grain_size = send_grain_size self._model_visit_lock = Lock() self._initial_model_initialized = False @@ -165,7 +168,19 @@ def workingloop(self, dataset, tokenizer: Optional[Callable[[Any], dict]] = None experience = self._make_experience(inputs=inputs) self._on_make_experience_end(experience) self._model_visit_lock.release() - self._send_experience(experience=experience) + # split experience for smoother handover + items = split_experience_batch(experience) + temp_buffer = [] + for item in items: + temp_buffer.append(item) + if len(temp_buffer) >= self.send_grain_size: + experience_fragment = make_experience_batch(temp_buffer) + self._send_experience(experience=experience_fragment) + temp_buffer = [] + # remain + if len(temp_buffer) > 0: + experience_fragment = make_experience_batch(temp_buffer) + self._send_experience(experience=experience_fragment) self._on_finish() @ray.method(concurrency_group="model_io") From d2428f88c0a84abb8e1ae054e875b6aa46003492 Mon Sep 17 00:00:00 2001 From: Hongxin Liu Date: Sun, 23 Apr 2023 11:01:21 +0800 Subject: [PATCH 04/26] [chat] refactor trainer and maker (#12) * [chat] refactor experience maker holder * [chat] refactor model init * [chat] refactor trainer args * [chat] refactor model init * [chat] refactor trainer --- .../Chat/coati/ray/example/1mmt_dummy.py | 64 +++---- .../coati/ray/src/detached_trainer_base.py | 12 +- .../coati/ray/src/detached_trainer_ppo.py | 149 ++++----------- .../coati/ray/src/experience_maker_holder.py | 179 ++++-------------- .../coati/trainer/strategies/colossalai.py | 7 +- 5 files changed, 117 insertions(+), 294 deletions(-) diff --git a/applications/Chat/coati/ray/example/1mmt_dummy.py b/applications/Chat/coati/ray/example/1mmt_dummy.py index 68b666663b12..fdb742406b26 100644 --- a/applications/Chat/coati/ray/example/1mmt_dummy.py +++ b/applications/Chat/coati/ray/example/1mmt_dummy.py @@ -1,15 +1,18 @@ import argparse import os import socket -from copy import deepcopy from functools import partial import ray import torch -from coati.models.base import RewardModel from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer from coati.ray.src.experience_maker_holder import ExperienceMakerHolder -from coati.ray.src.utils import get_actor_from_args, get_critic_from_args, get_reward_model_from_args +from coati.ray.src.utils import ( + get_actor_from_args, + get_critic_from_args, + get_reward_model_from_args, + get_strategy_from_args, +) from transformers import AutoTokenizer, BloomTokenizerFast from transformers.models.gpt2.configuration_gpt2 import GPT2Config from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer @@ -81,39 +84,44 @@ def main(args): tokenizer = GPT2Tokenizer.from_pretrained('gpt2') tokenizer.pad_token = tokenizer.eos_token + def trainer_model_fn(): + actor = get_actor_from_args(args.model, args.pretrain).half().cuda() + critic = get_critic_from_args(args.model, args.pretrain).half().cuda() + return actor, critic + # configure Trainer trainer_refs = [ DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( experience_maker_holder_name_list=["maker1"], - strategy=args.trainer_strategy, - model=args.model, + strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), + model_fn=trainer_model_fn, env_info=env_info_trainer, - pretrained=args.pretrain, - lora_rank=args.lora_rank, train_batch_size=args.train_batch_size, buffer_limit=16, - experience_batch_size=args.experience_batch_size, max_epochs=args.max_epochs, - # kwargs: - max_length=512, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, eval_performance=True, debug=args.debug, ) for i, env_info_trainer in enumerate(env_info_trainers) ] + def model_fn(): + actor = get_actor_from_args(args.model, args.pretrain).half().cuda() + critic = get_critic_from_args(args.model, args.pretrain).half().cuda() + reward_model = get_reward_model_from_args(args.model, args.pretrain).half().cuda() + initial_model = get_actor_from_args(args.model, args.pretrain).half().cuda() + return actor, critic, reward_model, initial_model + # configure Experience Maker experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( detached_trainer_name_list=[f'trainer{i}' for i in range(args.num_trainers)], - strategy=args.maker_strategy, + strategy_fn=partial(get_strategy_from_args, args.maker_strategy), + model_fn=model_fn, env_info=env_info_maker, experience_batch_size=args.experience_batch_size, kl_coef=0.1, - # kwargs: + debug=args.debug, + # sync_models_from_trainers=True, + # generation kwargs: max_length=512, do_sample=True, temperature=1.0, @@ -122,32 +130,22 @@ def main(args): eos_token_id=tokenizer.eos_token_id, eval_performance=True, use_cache=True, - debug=args.debug, ) - def init_inference_model(fn, model_name, pretrained): - model = fn(model_name, pretrained) - return model.half().cuda() - - # init maker locally - ray.get( - experience_holder_ref.initialize_experience_maker_local.remote( - initial_model_func=partial(init_inference_model, get_actor_from_args, args.model, args.pretrain), - reward_model_func=partial(init_inference_model, get_reward_model_from_args, args.model, args.pretrain), - actor_func=partial(init_inference_model, get_actor_from_args, args.model, args.pretrain), - critic_func=partial(init_inference_model, get_critic_from_args, args.model, args.pretrain), - )) - # configure sampler random_prompts = torch.randint(tokenizer.vocab_size, (1000, 400)) def tokenize_fn(texts): - # print(texts) input_ids = torch.stack(texts).cuda() - # print(input_ids.shape) attn_mask = torch.ones_like(input_ids) return {'input_ids': input_ids, 'attention_mask': attn_mask} + # uncomment this function if sync_models_from_trainers is True + # ray.get([ + # trainer_ref.sync_models_to_remote_makers.remote() + # for trainer_ref in trainer_refs + # ]) + wait_tasks = [] for trainer_ref in trainer_refs: diff --git a/applications/Chat/coati/ray/src/detached_trainer_base.py b/applications/Chat/coati/ray/src/detached_trainer_base.py index 3558f58017a6..86b60582a614 100644 --- a/applications/Chat/coati/ray/src/detached_trainer_base.py +++ b/applications/Chat/coati/ray/src/detached_trainer_base.py @@ -21,7 +21,6 @@ class DetachedTrainer(ABC): Args: detached_strategy (DetachedStrategy): the strategy to use for training detached_replay_buffer_ref (ObjectRef[DetachedReplayBuffer]): the replay buffer to use for training - experience_batch_size (int, defaults to 8): the batch size to use for experience generation max_epochs (int, defaults to 1): the number of epochs of training process data_loader_pin_memory (bool, defaults to True): whether to pin memory for data loader callbacks (List[Callback], defaults to []): the callbacks to call during training process @@ -34,21 +33,17 @@ def __init__(self, train_batch_size: int = 8, buffer_limit: int = 0, buffer_cpu_offload: bool = True, - experience_batch_size: int = 8, max_epochs: int = 1, dataloader_pin_memory: bool = True, callbacks: List[Callback] = [], - debug: bool = False, - **generate_kwargs) -> None: + debug: bool = False) -> None: super().__init__() self.detached_replay_buffer = DetachedReplayBuffer(train_batch_size, limit=buffer_limit, cpu_offload=buffer_cpu_offload) - self.experience_batch_size = experience_batch_size self.max_epochs = max_epochs self.dataloader_pin_memory = dataloader_pin_memory self.callbacks = callbacks - self.generate_kwargs = generate_kwargs self.target_holder_name_list = experience_maker_holder_name_list self.target_holder_list = [] @@ -61,9 +56,12 @@ def update_target_holder_list(self, experience_maker_holder_name_list): self.target_holder_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) @abstractmethod - def _update_remote_makers(self): + def _update_remote_makers(self, fully_update: bool = False, **kwargs): pass + def sync_models_to_remote_makers(self, **kwargs): + self._update_remote_makers(fully_update=True, **kwargs) + @abstractmethod def training_step(self, experience: Experience) -> Dict[str, Any]: pass diff --git a/applications/Chat/coati/ray/src/detached_trainer_ppo.py b/applications/Chat/coati/ray/src/detached_trainer_ppo.py index 2850f1cf1d37..056942b83360 100644 --- a/applications/Chat/coati/ray/src/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/src/detached_trainer_ppo.py @@ -1,10 +1,9 @@ -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, Tuple import ray import torch from coati.experience_maker import Experience, NaiveExperienceMaker from coati.models.base import Actor, Critic -from coati.models.generation_utils import update_model_kwargs_fn from coati.models.loss import PolicyLoss, ValueLoss from coati.trainer.callbacks import Callback from coati.trainer.callbacks.performance_evaluator import TrainerPerformaceEvaluator @@ -54,42 +53,30 @@ class DetachedPPOTrainer(DetachedTrainer): ''' def __init__( - self, - experience_maker_holder_name_list: List[str], - strategy: str, - model: str, - pretrained: str = None, - lora_rank: int = 0, - cr_model: str = None, # if not None, use below cr settings for critic - cr_pretrained: str = None, - cr_lora_rank: int = 0, - env_info: Dict[str, str] = None, - train_batch_size: int = 8, - buffer_limit: int = 0, - buffer_cpu_offload: bool = True, - eps_clip: float = 0.2, - value_clip: float = 0.4, - experience_batch_size: int = 8, - max_epochs: int = 10, - dataloader_pin_memory: bool = True, - callbacks: List[Callback] = [], - eval_performance: bool = False, - debug: bool = False, - **generate_kwargs) -> None: + self, + experience_maker_holder_name_list: List[str], + strategy_fn: Callable[[], Strategy], + model_fn: Callable[[], Tuple[Actor, Critic]], + env_info: Dict[str, str] = None, + train_batch_size: int = 8, + buffer_limit: int = 0, + buffer_cpu_offload: bool = True, + eps_clip: float = 0.2, + value_clip: float = 0.4, + max_epochs: int = 10, + dataloader_pin_memory: bool = True, + callbacks: List[Callback] = [], + eval_performance: bool = False, + debug: bool = False, + ) -> None: # set environment variables if env_info: set_dist_env(env_info=env_info) # configure strategy - self.strategy = get_strategy_from_args(strategy) + self.strategy = strategy_fn() # configure models, loss and optimizers - if cr_model is None: - cr_model = model - cr_pretrained = pretrained - cr_lora_rank = lora_rank - with self.strategy.model_init_context(): - self.actor = get_actor_from_args(model, pretrained, lora_rank) - self.critic = get_critic_from_args(cr_model, cr_pretrained, cr_lora_rank) + self.actor, self.critic = model_fn() if eval_performance: actor_numel = get_model_numel(self.actor) @@ -97,11 +84,7 @@ def __init__( evaluator = TrainerPerformaceEvaluator(actor_numel, critic_numel) callbacks = callbacks + [evaluator] - if strategy != 'colossalai_gemini': - self.actor.to(torch.cuda.current_device()) # .to(torch.float16) - self.critic.to(torch.cuda.current_device()) # .to(torch.float16) - - if strategy.startswith('colossalai'): + if isinstance(self.strategy, ColossalAIStrategy): self.actor_optim = HybridAdam(self.actor.parameters(), lr=1e-7) self.critic_optim = HybridAdam(self.critic.parameters(), lr=1e-7) else: @@ -112,7 +95,6 @@ def __init__( self.strategy.prepare((self.actor, self.actor_optim), (self.critic, self.critic_optim)) # configure trainer - generate_kwargs = _set_default_generate_kwargs(self.strategy, generate_kwargs, self.actor) self.actor_loss_fn = PolicyLoss(eps_clip) self.critic_loss_fn = ValueLoss(value_clip) @@ -120,88 +102,42 @@ def __init__( train_batch_size=train_batch_size, buffer_limit=buffer_limit, buffer_cpu_offload=buffer_cpu_offload, - experience_batch_size=experience_batch_size, max_epochs=max_epochs, dataloader_pin_memory=dataloader_pin_memory, callbacks=callbacks, - debug=debug, - **generate_kwargs) - - # for remote maker initialization - self._model_str = model - self._cr_model_str = cr_model - self._pretrained = pretrained - self._cr_pretrained = cr_pretrained + debug=debug) @ray.method(concurrency_group="model_io") @torch.no_grad() - def _update_remote_makers(self, **config): + def _update_remote_makers(self, fully_update: bool = False, **config): # TODO: balance duties if is_rank_0(): self.update_target_holder_list(self.target_holder_name_list) - # actor: - if is_rank_0(): - # mark start + # mark start, ensure order + tasks = [] for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(chunk_start=True) + tasks.append(target_holder.update_experience_maker.remote(chunk_start=True, fully_update=fully_update)) + ray.get(tasks) # sending loop + tasks = [] for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_model(self.actor), **config): if is_rank_0(): for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(new_actor_state_dict=state_dict_shard) - if is_rank_0(): - # mark end - for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(chunk_end=True) - # critic - if is_rank_0(): - # mark start - for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(chunk_start=True) - # sending loop + tasks.append( + target_holder.update_experience_maker.remote(new_actor_state_dict=state_dict_shard, + fully_update=fully_update)) + # sending loop for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_critic(self.critic), **config): if is_rank_0(): for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(new_critic_state_dict=state_dict_shard) + tasks.append( + target_holder.update_experience_maker.remote(new_critic_state_dict=state_dict_shard, + fully_update=fully_update)) + ray.get(tasks) if is_rank_0(): # mark end for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(chunk_end=True) - - @ray.method(concurrency_group="model_io") - def initialize_remote_makers(self, **config): - # TODO: balance duties - if is_rank_0(): - self.update_target_holder_list(self.target_holder_name_list) - with torch.no_grad(): - # actor / initial_model: - # mark start - for target_holder in self.target_holder_list: - target_holder.initialize_experience_maker.remote(actor_model=self._model_str, - actor_pretrained=self._pretrained, - chunk_start=True) - # sending loop - for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_actor(self.actor), - **config): - for target_holder in self.target_holder_list: - target_holder.initialize_experience_maker.remote(actor_state_dict=state_dict_shard) - # mark end - for target_holder in self.target_holder_list: - target_holder.initialize_experience_maker.remote(actor_model=self._model_str, chunk_end=True) - # critic / reward_model: - # mark start - for target_holder in self.target_holder_list: - target_holder.initialize_experience_maker.remote(critic_model=self._cr_model_str, - critic_pretrained=self._cr_pretrained, - chunk_start=True) - # sending loop - for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_critic(self.critic), - **config): - for target_holder in self.target_holder_list: - target_holder.initialize_experience_maker.remote(critic_state_dict=state_dict_shard) - # mark end - for target_holder in self.target_holder_list: - target_holder.initialize_experience_maker.remote(critic_model=self._cr_model_str, chunk_end=True) + target_holder.update_experience_maker.remote(chunk_end=True, fully_update=fully_update) @ray.method(concurrency_group="compute") def training_step(self, experience: Experience) -> Dict[str, float]: @@ -273,16 +209,3 @@ def _get_model_state_dict_shard(self, model: torch.nn.Module, **config): pass for state_dict in self.strategy.get_model_state_dict_shard(model, **config): yield state_dict_to(state_dict) - - -def _set_default_generate_kwargs(strategy: Strategy, generate_kwargs: dict, actor: Actor) -> None: - origin_model = strategy._unwrap_actor(actor) - new_kwargs = {**generate_kwargs} - # use huggingface models method directly - if 'prepare_inputs_fn' not in generate_kwargs and hasattr(origin_model, 'prepare_inputs_for_generation'): - new_kwargs['prepare_inputs_fn'] = origin_model.prepare_inputs_for_generation - - if 'update_model_kwargs_fn' not in generate_kwargs: - new_kwargs['update_model_kwargs_fn'] = update_model_kwargs_fn - - return new_kwargs diff --git a/applications/Chat/coati/ray/src/experience_maker_holder.py b/applications/Chat/coati/ray/src/experience_maker_holder.py index 93624c2be921..c85d36bab360 100644 --- a/applications/Chat/coati/ray/src/experience_maker_holder.py +++ b/applications/Chat/coati/ray/src/experience_maker_holder.py @@ -3,14 +3,14 @@ import tracemalloc from copy import deepcopy from threading import Lock -from typing import Any, Callable, Dict, List, Optional, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, Union import ray import torch import torch.nn as nn from coati.experience_maker import Experience, ExperienceMaker, NaiveExperienceMaker from coati.models.base import Actor, Critic, RewardModel -from coati.replay_buffer.utils import split_experience_batch, make_experience_batch, BufferItem +from coati.replay_buffer.utils import BufferItem, make_experience_batch, split_experience_batch from coati.trainer.callbacks import Callback from coati.trainer.callbacks.performance_evaluator import ExperienceMakerPerformanceEvaluator from coati.trainer.strategies import Strategy @@ -37,73 +37,67 @@ class ExperienceMakerHolder: strategy: experience_batch_size: batch size of generated experience kl_coef: the coefficient of kl divergence loss + sync_models_from_trainers: whether to sync models from trainers. If True, you must call sync_models_to_remote_makers() in trainers to sync models. ''' - def __init__(self, - detached_trainer_name_list: List[str], - strategy: str, - env_info: Dict[str, str] = None, - experience_batch_size: int = 8, - kl_coef: float = 0.1, - callbacks: List[Callback] = [], - eval_performance: bool = False, - debug: bool = False, - send_grain_size: int = 4, - **generate_kwargs): + def __init__( + self, + detached_trainer_name_list: List[str], + strategy_fn: Callable[[], Strategy], + # a function returns (actor, critic, reward_model, initial_model) + model_fn: Callable[[], Tuple[Actor, Critic, RewardModel, Actor]], + env_info: Dict[str, str] = None, + sync_models_from_trainers: bool = False, + experience_batch_size: int = 8, + send_grain_size: int = 4, + kl_coef: float = 0.1, + callbacks: List[Callback] = [], + eval_performance: bool = False, + debug: bool = False, + **generate_kwargs): # set environment variables if env_info: set_dist_env(env_info=env_info) self.target_trainer_list = [] for name in detached_trainer_name_list: self.target_trainer_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) - self.strategy_str = strategy - self.strategy = get_strategy_from_args(strategy) + self.strategy = strategy_fn() self.experience_batch_size = experience_batch_size self.kl_coef = kl_coef - self.generate_kwargs = generate_kwargs - actor, critic, reward_model, initial_model = None, None, None, None + # init models + with self.strategy.model_init_context(): + actor, critic, reward_model, initial_model = model_fn() + self.generate_kwargs = _set_default_generate_kwargs(generate_kwargs, actor) + if eval_performance: + actor_numel = get_model_numel(actor) + critic_numel = get_model_numel(critic) + initial_model_numel = get_model_numel(initial_model) + reward_model_numel = get_model_numel(reward_model) + evaluator = ExperienceMakerPerformanceEvaluator(actor_numel, critic_numel, initial_model_numel, + reward_model_numel) + callbacks = callbacks + [evaluator] + + actor, critic, reward_model, initial_model = self.strategy.prepare(actor, critic, reward_model, initial_model) self.experience_maker = NaiveExperienceMaker(actor, critic, reward_model, initial_model, self.kl_coef) self.callbacks = callbacks - self.eval_performance = eval_performance self.send_grain_size = send_grain_size self._model_visit_lock = Lock() - self._initial_model_initialized = False - self._reward_model_initialized = False - self._actor_initialized = False - self._critic_initialized = False + + self._is_fully_initialized = not sync_models_from_trainers self._debug = debug self.target_auto_balance = False - if self._debug: + if self._debug and not self._is_fully_initialized: print('[maker] Waiting for INIT') def _get_ready(self): while not self._fully_initialized(): time.sleep(1.0) - # setup performance evaluator - if self.eval_performance: - actor_numel = get_model_numel(self.experience_maker.actor) - critic_numel = get_model_numel(self.experience_maker.critic) - initial_model_numel = get_model_numel(self.experience_maker.initial_model) - reward_model_numel = get_model_numel(self.experience_maker.reward_model) - evaluator = ExperienceMakerPerformanceEvaluator(actor_numel, critic_numel, initial_model_numel, - reward_model_numel) - self.callbacks.append(evaluator) - - self.generate_kwargs = _set_default_generate_kwargs(self.generate_kwargs, self.experience_maker.actor) def _fully_initialized(self): - if not self._initial_model_initialized: - return False - if not self._reward_model_initialized: - return False - if not self._actor_initialized: - return False - if not self._critic_initialized: - return False - return True + return self._is_fully_initialized def update_target_trainer_list(self, detached_trainer_name_list): self.target_trainer_list = [] @@ -183,110 +177,18 @@ def workingloop(self, dataset, tokenizer: Optional[Callable[[Any], dict]] = None self._send_experience(experience=experience_fragment) self._on_finish() - @ray.method(concurrency_group="model_io") - def initialize_experience_maker(self, - actor_model: str = None, - actor_pretrained: str = None, - actor_state_dict: Dict[str, Any] = None, - critic_model: str = None, - critic_pretrained: str = None, - critic_state_dict: Dict[str, Any] = None, - chunk_start: bool = None, - chunk_end: bool = None): - ''' - called by trainer - chunk_start: Set True at the first call. Before sending state_dict calls - chunk_end: Set True at the last call. After sending state_dict calls. - - TODO: load_state_dict integrate with model-sharding strategy - ''' - if self._fully_initialized(): - return - - if chunk_start: - if self._debug: - print('[maker] INIT') - with torch.no_grad(): - # (csric) any better way to get model structure? - with self.strategy.model_init_context(): - if not self._actor_initialized and actor_model is not None: - self.experience_maker.actor = get_actor_from_args(actor_model, - actor_pretrained).half().requires_grad_(False) - if not self._critic_initialized and critic_model is not None: - self.experience_maker.critic = get_critic_from_args( - critic_model, critic_pretrained).half().requires_grad_(False) - if not self._initial_model_initialized and actor_model is not None: - self.experience_maker.initial_model = get_actor_from_args( - actor_model, actor_pretrained).half().requires_grad_(False) - if not self._reward_model_initialized and critic_model is not None: - self.experience_maker.reward_model = get_reward_model_from_args( - critic_model, critic_pretrained).half().requires_grad_(False) - - with torch.no_grad(): - if not self._actor_initialized and actor_state_dict is not None: - self.experience_maker.actor.model.load_state_dict(actor_state_dict, strict=False) - if not self._critic_initialized and critic_state_dict is not None: - self.experience_maker.critic.load_state_dict(critic_state_dict, strict=False) - if not self._initial_model_initialized and actor_state_dict is not None: - self.experience_maker.initial_model.model.load_state_dict(actor_state_dict, strict=False) - if not self._reward_model_initialized and critic_state_dict is not None: - self.experience_maker.reward_model.load_state_dict(critic_state_dict, strict=False) - - if chunk_end: - with torch.no_grad(): - if actor_model is not None: - if not self._actor_initialized: - self.experience_maker.actor = self.strategy.prepare( - self.experience_maker.actor.to(torch.cuda.current_device())) - if not self._initial_model_initialized: - self.experience_maker.initial_model = self.strategy.prepare( - self.experience_maker.initial_model.to(torch.cuda.current_device())) - self._actor_initialized = True - self._initial_model_initialized = True - if critic_model is not None: - if not self._critic_initialized: - self.experience_maker.critic = self.strategy.prepare( - self.experience_maker.critic.to(torch.cuda.current_device())) - if not self._reward_model_initialized: - self.experience_maker.reward_model = self.strategy.prepare( - self.experience_maker.reward_model.to(torch.cuda.current_device())) - self._critic_initialized = True - self._reward_model_initialized = True - - def initialize_experience_maker_local(self, - initial_model_func=None, - reward_model_func=None, - actor_func=None, - critic_func=None): - ''' - Use function call to construct the model here, because some strategy requieres env_info - The model initialized here will be IGNORED in initialize_experience_maker. - initial_model and reward_model can have their own strategy rather than self.strategy. For example, Quantization. - ''' - - if actor_func is not None: - self.experience_maker.actor = actor_func() - self._actor_initialized = True - if critic_func is not None: - self.experience_maker.critic = critic_func() - self._critic_initialized = True - if initial_model_func is not None: - self.experience_maker.initial_model = initial_model_func() - self._initial_model_initialized = True - if reward_model_func is not None: - self.experience_maker.reward_model = reward_model_func() - self._reward_model_initialized = True - @ray.method(concurrency_group="model_io") def update_experience_maker(self, new_actor_state_dict: Dict[str, Any] = None, new_critic_state_dict: Dict[str, Any] = None, + fully_update: bool = False, chunk_start: bool = None, chunk_end: bool = None): ''' called by trainer chunk_start: Set True at the first call. Before sending state_dict calls chunk_end: Set True at the last call. After sending state_dict calls. + fully_update: Set True if you want to sync models when initializing TODO: load_state_dict integrate with model-sharding strategy ''' @@ -304,12 +206,15 @@ def update_experience_maker(self, if new_critic_state_dict is not None: self.experience_maker.critic.load_state_dict(new_critic_state_dict, strict=False) + # the lock must be released after both actor and critic being updated if chunk_end: self._model_visit_lock.release() if _watch_memory: current, peak = tracemalloc.get_traced_memory() print(f"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB") tracemalloc.stop() + if fully_update: + self._is_fully_initialized = True def _on_make_experience_start(self) -> None: for callback in self.callbacks: diff --git a/applications/Chat/coati/trainer/strategies/colossalai.py b/applications/Chat/coati/trainer/strategies/colossalai.py index 5a6021c5013f..b809c010247b 100644 --- a/applications/Chat/coati/trainer/strategies/colossalai.py +++ b/applications/Chat/coati/trainer/strategies/colossalai.py @@ -5,7 +5,7 @@ import torch.distributed as dist import torch.nn as nn import torch.optim as optim -from coati.models.base import LM, Actor, RewardModel, Critic +from coati.models.base import LM, Actor, Critic, RewardModel from coati.models.lora import LoraLinear from torch.optim import Optimizer from transformers.modeling_utils import PreTrainedModel @@ -139,7 +139,7 @@ def setup_model(self, model: nn.Module) -> nn.Module: model = zero_model_wrapper(model, zero_stage=self.stage, gemini_config=self.gemini_config) if self.stage != 3 and self.precision == 'fp16': - model = model.half() + model = model.half().cuda() return model def setup_optimizer(self, optimizer: optim.Optimizer, model: nn.Module) -> optim.Optimizer: @@ -163,7 +163,6 @@ def _unwrap_actor(actor: Actor) -> nn.Module: def _unwrap_critic(critic: Critic) -> nn.Module: return Strategy._unwrap_critic(critic) - def _unwrap_model(self, model: Union[nn.Module, ZeroDDP]) -> nn.Module: return super()._unwrap_model(model) @@ -220,4 +219,4 @@ def get_model_state_dict_shard(self, model: nn.Module, **config): if isinstance(module, LoraLinear): module.merge_weights = True module.eval() - yield from model.state_dict_shard(max_shard_size=1024) \ No newline at end of file + yield from model.state_dict_shard(max_shard_size=1024) From 078f5aaa800d57ef0495e202a86b270bebb30d3c Mon Sep 17 00:00:00 2001 From: Hongxin Liu Date: Sun, 23 Apr 2023 16:33:05 +0800 Subject: [PATCH 05/26] [chat] refactor experience sending logic and training loop args (#13) * [chat] refactor experience send logic * [chat] refactor trainer * [chat] refactor trainer * [chat] refactor experience maker * [chat] refactor pbar --- .../Chat/coati/ray/example/1mmt_dummy.py | 121 +++++++----------- .../coati/ray/src/detached_replay_buffer.py | 16 +-- .../coati/ray/src/detached_trainer_base.py | 64 +++++---- .../coati/ray/src/detached_trainer_ppo.py | 5 - .../coati/ray/src/experience_maker_holder.py | 92 +++++++------ 5 files changed, 148 insertions(+), 150 deletions(-) diff --git a/applications/Chat/coati/ray/example/1mmt_dummy.py b/applications/Chat/coati/ray/example/1mmt_dummy.py index fdb742406b26..c7619ea6940b 100644 --- a/applications/Chat/coati/ray/example/1mmt_dummy.py +++ b/applications/Chat/coati/ray/example/1mmt_dummy.py @@ -13,37 +13,8 @@ get_reward_model_from_args, get_strategy_from_args, ) -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.configuration_gpt2 import GPT2Config -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - - -def get_gpt_config(model_name: str) -> GPT2Config: - model_map = { - 's': GPT2Config(), - 'm': GPT2Config(n_embd=1024, n_layer=24, n_head=16), - 'l': GPT2Config(n_embd=1280, n_layer=36, n_head=20), - 'xl': GPT2Config(n_embd=1600, n_layer=48, n_head=25), - '2b': GPT2Config(n_embd=2048, n_layer=40, n_head=16), - '4b': GPT2Config(n_embd=2304, n_layer=64, n_head=16), - '6b': GPT2Config(n_embd=4096, n_layer=30, n_head=16), - '8b': GPT2Config(n_embd=4096, n_layer=40, n_head=16), - '10b': GPT2Config(n_embd=4096, n_layer=50, n_head=16), - '12b': GPT2Config(n_embd=4096, n_layer=60, n_head=16), - '15b': GPT2Config(n_embd=4096, n_layer=78, n_head=16), - '18b': GPT2Config(n_embd=4096, n_layer=90, n_head=16), - '20b': GPT2Config(n_embd=8192, n_layer=25, n_head=16), - '24b': GPT2Config(n_embd=8192, n_layer=30, n_head=16), - '28b': GPT2Config(n_embd=8192, n_layer=35, n_head=16), - '32b': GPT2Config(n_embd=8192, n_layer=40, n_head=16), - '36b': GPT2Config(n_embd=8192, n_layer=45, n_head=16), - '40b': GPT2Config(n_embd=8192, n_layer=50, n_head=16), - '175b': GPT2Config(n_positions=2048, n_embd=12288, n_layer=96, n_head=96), - } - try: - return model_map[model_name] - except KeyError: - raise ValueError(f'Unknown model "{model_name}"') +from torch.utils.data import DataLoader +from transformers import AutoConfig, AutoTokenizer def get_free_port(): @@ -81,34 +52,16 @@ def main(args): } # configure tokenizer - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + tokenizer = AutoTokenizer.from_pretrained(args.pretrain) tokenizer.pad_token = tokenizer.eos_token - def trainer_model_fn(): - actor = get_actor_from_args(args.model, args.pretrain).half().cuda() - critic = get_critic_from_args(args.model, args.pretrain).half().cuda() - return actor, critic - - # configure Trainer - trainer_refs = [ - DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], - strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), - model_fn=trainer_model_fn, - env_info=env_info_trainer, - train_batch_size=args.train_batch_size, - buffer_limit=16, - max_epochs=args.max_epochs, - eval_performance=True, - debug=args.debug, - ) for i, env_info_trainer in enumerate(env_info_trainers) - ] - def model_fn(): - actor = get_actor_from_args(args.model, args.pretrain).half().cuda() - critic = get_critic_from_args(args.model, args.pretrain).half().cuda() - reward_model = get_reward_model_from_args(args.model, args.pretrain).half().cuda() - initial_model = get_actor_from_args(args.model, args.pretrain).half().cuda() + actor_cfg = AutoConfig.from_pretrained(args.pretrain) + critic_cfg = AutoConfig.from_pretrained(args.critic_pretrain) + actor = get_actor_from_args(args.model, config=actor_cfg).half().cuda() + critic = get_critic_from_args(args.model, config=critic_cfg).half().cuda() + reward_model = get_reward_model_from_args(args.model, config=critic_cfg).half().cuda() + initial_model = get_actor_from_args(args.model, config=actor_cfg).half().cuda() return actor, critic, reward_model, initial_model # configure Experience Maker @@ -117,7 +70,6 @@ def model_fn(): strategy_fn=partial(get_strategy_from_args, args.maker_strategy), model_fn=model_fn, env_info=env_info_maker, - experience_batch_size=args.experience_batch_size, kl_coef=0.1, debug=args.debug, # sync_models_from_trainers=True, @@ -132,14 +84,37 @@ def model_fn(): use_cache=True, ) - # configure sampler - random_prompts = torch.randint(tokenizer.vocab_size, (1000, 400)) + def trainer_model_fn(): + actor = get_actor_from_args(args.model, config=AutoConfig.from_pretrained(args.pretrain)).half().cuda() + critic = get_critic_from_args(args.model, config=AutoConfig.from_pretrained(args.critic_pretrain)).half().cuda() + return actor, critic + + # configure Trainer + trainer_refs = [ + DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( + experience_maker_holder_name_list=["maker1"], + strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), + model_fn=trainer_model_fn, + env_info=env_info_trainer, + train_batch_size=args.train_batch_size, + buffer_limit=16, + eval_performance=True, + debug=args.debug, + ) for i, env_info_trainer in enumerate(env_info_trainers) + ] + + dataset_size = args.experience_batch_size * 4 - def tokenize_fn(texts): - input_ids = torch.stack(texts).cuda() + def data_gen_fn(): + input_ids = torch.randint(tokenizer.vocab_size, (256,), device=torch.cuda.current_device()) attn_mask = torch.ones_like(input_ids) return {'input_ids': input_ids, 'attention_mask': attn_mask} + def build_dataloader(size): + dataset = [data_gen_fn() for _ in range(size)] + dataloader = DataLoader(dataset, batch_size=args.experience_batch_size) + return dataloader + # uncomment this function if sync_models_from_trainers is True # ray.get([ # trainer_ref.sync_models_to_remote_makers.remote() @@ -148,15 +123,13 @@ def tokenize_fn(texts): wait_tasks = [] - for trainer_ref in trainer_refs: - wait_tasks.append( - trainer_ref.fit.remote(num_episodes=args.num_episodes, - max_timesteps=args.max_timesteps, - update_timesteps=args.update_timesteps)) + wait_tasks.append( + experience_holder_ref.workingloop.remote(partial(build_dataloader, dataset_size), + num_steps=args.experience_steps)) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ - args.max_epochs * args.num_trainers + 3 # +3 for fault tolerance - wait_tasks.append(experience_holder_ref.workingloop.remote(random_prompts, tokenize_fn, times=num_exp_per_maker)) + total_steps = args.experience_batch_size * args.experience_steps // (args.num_trainers * args.train_batch_size) + for trainer_ref in trainer_refs: + wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) ray.get(wait_tasks) @@ -170,12 +143,12 @@ def tokenize_fn(texts): parser.add_argument('--maker_strategy', choices=['naive'], default='naive') parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--critic_pretrain', type=str, default=None) + parser.add_argument('--experience_steps', type=int, default=4) parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--train_epochs', type=int, default=1) + parser.add_argument('--update_steps', type=int, default=2) + parser.add_argument('--train_batch_size', type=int, default=8) parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") parser.add_argument('--debug', action='store_true') diff --git a/applications/Chat/coati/ray/src/detached_replay_buffer.py b/applications/Chat/coati/ray/src/detached_replay_buffer.py index 4bc74bb878fd..257b0b072493 100644 --- a/applications/Chat/coati/ray/src/detached_replay_buffer.py +++ b/applications/Chat/coati/ray/src/detached_replay_buffer.py @@ -26,12 +26,7 @@ class DetachedReplayBuffer: cpu_offload: Whether to offload experience to cpu when sampling. Defaults to True. ''' - def __init__(self, - sample_batch_size: int, - tp_world_size: int = 1, - limit: int = 0, - cpu_offload: bool = True) -> None: - self.cpu_offload = cpu_offload + def __init__(self, sample_batch_size: int, tp_world_size: int = 1, limit: int = 0) -> None: self.sample_batch_size = sample_batch_size self.limit = limit self.items = Queue(self.limit, actor_options={"num_cpus": 1}) @@ -51,9 +46,14 @@ def append(self, experience: Experience) -> None: ''' Expected to be called remotely. ''' - if self.cpu_offload: - experience.to_device(torch.device('cpu')) items = split_experience_batch(experience) + self.extend(items) + + @torch.no_grad() + def extend(self, items: List[BufferItem]) -> None: + ''' + Expected to be called remotely. + ''' self.batch_collector.extend(items) while len(self.batch_collector) >= self.sample_batch_size: items = self.batch_collector[:self.sample_batch_size] diff --git a/applications/Chat/coati/ray/src/detached_trainer_base.py b/applications/Chat/coati/ray/src/detached_trainer_base.py index 86b60582a614..1137d8f7b491 100644 --- a/applications/Chat/coati/ray/src/detached_trainer_base.py +++ b/applications/Chat/coati/ray/src/detached_trainer_base.py @@ -1,10 +1,13 @@ import os from abc import ABC, abstractmethod -from typing import Any, Callable, Dict, List, Optional, Union +from typing import Any, Callable, Dict, Iterable, List, Optional, Union import ray +import torch from coati.experience_maker import Experience +from coati.replay_buffer.utils import BufferItem from coati.trainer.callbacks import Callback +from torch.utils.data import DataLoader from tqdm import tqdm from .detached_replay_buffer import DetachedReplayBuffer @@ -21,7 +24,6 @@ class DetachedTrainer(ABC): Args: detached_strategy (DetachedStrategy): the strategy to use for training detached_replay_buffer_ref (ObjectRef[DetachedReplayBuffer]): the replay buffer to use for training - max_epochs (int, defaults to 1): the number of epochs of training process data_loader_pin_memory (bool, defaults to True): whether to pin memory for data loader callbacks (List[Callback], defaults to []): the callbacks to call during training process generate_kwargs (dict, optional): the kwargs to use while model generating @@ -32,16 +34,11 @@ def __init__(self, experience_maker_holder_name_list: List[str], train_batch_size: int = 8, buffer_limit: int = 0, - buffer_cpu_offload: bool = True, - max_epochs: int = 1, dataloader_pin_memory: bool = True, callbacks: List[Callback] = [], debug: bool = False) -> None: super().__init__() - self.detached_replay_buffer = DetachedReplayBuffer(train_batch_size, - limit=buffer_limit, - cpu_offload=buffer_cpu_offload) - self.max_epochs = max_epochs + self.detached_replay_buffer = DetachedReplayBuffer(train_batch_size, limit=buffer_limit) self.dataloader_pin_memory = dataloader_pin_memory self.callbacks = callbacks self.target_holder_name_list = experience_maker_holder_name_list @@ -66,31 +63,45 @@ def sync_models_to_remote_makers(self, **kwargs): def training_step(self, experience: Experience) -> Dict[str, Any]: pass - def _learn(self): - pbar = tqdm(range(self.max_epochs), desc='Train epoch', disable=not is_rank_0()) - for _ in pbar: - if self._debug: - print("[trainer] sampling exp") - experience = self._buffer_sample() + def _learn(self, update_steps: int, train_epochs: int) -> None: + data = [] + # warmup + pbar = tqdm(range(update_steps), desc=f'Train epoch [1/{train_epochs}]', disable=not is_rank_0()) + self._learn_epoch(pbar, data) + # item is already a batch + dataloader = DataLoader(data, + batch_size=1, + shuffle=True, + pin_memory=self.dataloader_pin_memory, + collate_fn=lambda x: x[0]) + for epoch in range(1, train_epochs): + pbar = tqdm(dataloader, desc=f'Train epoch [{epoch + 1}/{train_epochs}]', disable=not is_rank_0()) + self._learn_epoch(pbar, data) + + def _learn_epoch(self, pbar: tqdm, data: List[Experience]) -> None: + is_warmup = len(data) == 0 + for x in pbar: if self._debug: print("[trainer] training step") + # sample a batch and then train to avoid waiting + experience = x if not is_warmup else self._buffer_sample() + experience.to_device(torch.cuda.current_device()) self._on_learn_batch_start() metrics = self.training_step(experience) self._on_learn_batch_end(metrics, experience) + if self._debug: print("[trainer] step over") + experience.to_device("cpu") + if is_warmup: + data.append(experience) pbar.set_postfix(metrics) - def fit(self, num_episodes: int = 50000, max_timesteps: int = 500, update_timesteps: int = 5000) -> None: + def fit(self, total_steps: int, update_steps: int, train_epochs: int = 1) -> None: self._on_fit_start() - for episode in range(num_episodes): - self._on_episode_start(episode) - for timestep in tqdm(range(max_timesteps // update_timesteps), - desc=f'Episode [{episode+1}/{num_episodes}]', - disable=not is_rank_0()): - self._learn() - self._update_remote_makers() - self._on_episode_end(episode) + for _ in tqdm(range(total_steps // update_steps), desc='Trainer', disable=not is_rank_0()): + self._learn(update_steps, train_epochs) + self._update_remote_makers() self._on_fit_end() self._on_finish() @@ -108,6 +119,13 @@ def buffer_append(self, experience: Experience): print(f"[trainer] receiving exp.") self.detached_replay_buffer.append(experience) + @ray.method(concurrency_group="buffer_append") + def buffer_extend(self, items: List[BufferItem]): + # called by ExperienceMakerHolder + if self._debug: + print(f"[trainer] receiving exp.") + self.detached_replay_buffer.extend(items) + @ray.method(concurrency_group="buffer_sample") def _buffer_sample(self): return self.detached_replay_buffer.sample() diff --git a/applications/Chat/coati/ray/src/detached_trainer_ppo.py b/applications/Chat/coati/ray/src/detached_trainer_ppo.py index 056942b83360..b0630cd0b5ae 100644 --- a/applications/Chat/coati/ray/src/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/src/detached_trainer_ppo.py @@ -60,10 +60,8 @@ def __init__( env_info: Dict[str, str] = None, train_batch_size: int = 8, buffer_limit: int = 0, - buffer_cpu_offload: bool = True, eps_clip: float = 0.2, value_clip: float = 0.4, - max_epochs: int = 10, dataloader_pin_memory: bool = True, callbacks: List[Callback] = [], eval_performance: bool = False, @@ -101,8 +99,6 @@ def __init__( super().__init__(experience_maker_holder_name_list, train_batch_size=train_batch_size, buffer_limit=buffer_limit, - buffer_cpu_offload=buffer_cpu_offload, - max_epochs=max_epochs, dataloader_pin_memory=dataloader_pin_memory, callbacks=callbacks, debug=debug) @@ -144,7 +140,6 @@ def training_step(self, experience: Experience) -> Dict[str, float]: self.actor.train() self.critic.train() - experience.to_device(torch.cuda.current_device()) num_actions = experience.action_mask.size(1) action_log_probs = self.actor(experience.sequences, num_actions, attention_mask=experience.attention_mask) actor_loss = self.actor_loss_fn(action_log_probs, diff --git a/applications/Chat/coati/ray/src/experience_maker_holder.py b/applications/Chat/coati/ray/src/experience_maker_holder.py index c85d36bab360..ebeb58137370 100644 --- a/applications/Chat/coati/ray/src/experience_maker_holder.py +++ b/applications/Chat/coati/ray/src/experience_maker_holder.py @@ -3,7 +3,7 @@ import tracemalloc from copy import deepcopy from threading import Lock -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import ray import torch @@ -17,16 +17,9 @@ from coati.trainer.strategies.sampler import DistributedSampler from ray.exceptions import GetTimeoutError from torch import Tensor +from tqdm import tqdm -from .utils import ( - get_actor_from_args, - get_critic_from_args, - get_model_numel, - get_reward_model_from_args, - get_strategy_from_args, - is_rank_0, - set_dist_env, -) +from .utils import get_model_numel, is_rank_0, set_dist_env @ray.remote(concurrency_groups={"experience_io": 1, "model_io": 1, "compute": 1}) @@ -35,7 +28,6 @@ class ExperienceMakerHolder: Args: detached_trainer_name_list: str list to get ray actor handles strategy: - experience_batch_size: batch size of generated experience kl_coef: the coefficient of kl divergence loss sync_models_from_trainers: whether to sync models from trainers. If True, you must call sync_models_to_remote_makers() in trainers to sync models. ''' @@ -48,8 +40,7 @@ def __init__( model_fn: Callable[[], Tuple[Actor, Critic, RewardModel, Actor]], env_info: Dict[str, str] = None, sync_models_from_trainers: bool = False, - experience_batch_size: int = 8, - send_grain_size: int = 4, + buffer_cpu_offload: bool = True, kl_coef: float = 0.1, callbacks: List[Callback] = [], eval_performance: bool = False, @@ -62,7 +53,7 @@ def __init__( for name in detached_trainer_name_list: self.target_trainer_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) self.strategy = strategy_fn() - self.experience_batch_size = experience_batch_size + self.buffer_cpu_offload = buffer_cpu_offload self.kl_coef = kl_coef # init models with self.strategy.model_init_context(): @@ -80,7 +71,6 @@ def __init__( actor, critic, reward_model, initial_model = self.strategy.prepare(actor, critic, reward_model, initial_model) self.experience_maker = NaiveExperienceMaker(actor, critic, reward_model, initial_model, self.kl_coef) self.callbacks = callbacks - self.send_grain_size = send_grain_size self._model_visit_lock = Lock() @@ -89,6 +79,8 @@ def __init__( self._debug = debug self.target_auto_balance = False + self._target_idx = 0 + if self._debug and not self._is_fully_initialized: print('[maker] Waiting for INIT') @@ -114,6 +106,7 @@ def _make_experience(self, inputs: Union[Tensor, Dict[str, Tensor]]) -> Experien else: raise ValueError(f'Unsupported input type "{type(inputs)}"') + # TODO(ver217): remove this method @ray.method(concurrency_group="experience_io") def _send_experience(self, experience): if not self.target_auto_balance: @@ -148,33 +141,52 @@ def _send_experience(self, experience): print(f"[maker] sending exp to {chosen_trainer}") chosen_trainer.buffer_append.remote(experience) - def workingloop(self, dataset, tokenizer: Optional[Callable[[Any], dict]] = None, times=5000 * 50000): - self._get_ready() - sampler = self.strategy.setup_sampler(dataset) - for _ in range(times): - rand_prompts = sampler.sample(self.experience_batch_size) - if tokenizer is not None: - inputs = tokenizer(rand_prompts) - else: - inputs = rand_prompts - self._model_visit_lock.acquire() + @ray.method(concurrency_group="experience_io") + def _send_items(self, experience: Experience) -> None: + items = split_experience_batch(experience) + items_per_trainer = [[] for _ in range(len(self.target_trainer_list))] + for item in items: + items_per_trainer[self._target_idx].append(item) + self._target_idx = (self._target_idx + 1) % len(self.target_trainer_list) + for i, target_trainer in enumerate(self.target_trainer_list): + if len(items_per_trainer[i]) > 0: + target_trainer.buffer_extend.remote(items_per_trainer[i]) + + def _inference_step(self, batch) -> None: + with self._model_visit_lock: self._on_make_experience_start() - experience = self._make_experience(inputs=inputs) + experience = self._make_experience(batch) self._on_make_experience_end(experience) - self._model_visit_lock.release() - # split experience for smoother handover - items = split_experience_batch(experience) - temp_buffer = [] - for item in items: - temp_buffer.append(item) - if len(temp_buffer) >= self.send_grain_size: - experience_fragment = make_experience_batch(temp_buffer) - self._send_experience(experience=experience_fragment) - temp_buffer = [] - # remain - if len(temp_buffer) > 0: - experience_fragment = make_experience_batch(temp_buffer) - self._send_experience(experience=experience_fragment) + if self.buffer_cpu_offload: + experience.to_device('cpu') + self._send_items(experience) + + def workingloop(self, dataloader_fn: Callable[[], Iterable], num_epochs: int = 1, num_steps: int = 0): + """Working loop of the experience maker. + + Args: + dataloader_fn (Callable[[], Iterable]): A function that returns a dataloader. + num_epochs (int, optional): Iterate the dataloader for number of epochs. Defaults to 1. + num_steps (int, optional): Iterate the dataloader for number if steps. If this value > 0, num_epochs will be ignored. Defaults to 0. + """ + self._get_ready() + dataloader = dataloader_fn() + if num_steps > 0: + # ignore num epochs + it = iter(dataloader) + for _ in tqdm(range(num_steps), desc='ExperienceMaker', disable=not is_rank_0()): + try: + batch = next(it) + except StopIteration: + it = iter(dataloader) + batch = next(it) + self._inference_step(batch) + else: + with tqdm(total=num_epochs * len(dataloader), desc='ExperienceMaker', disable=not is_rank_0()) as pbar: + for _ in range(num_epochs): + for batch in dataloader: + self._inference_step(batch) + pbar.update() self._on_finish() @ray.method(concurrency_group="model_io") From e35156ca4f2300d2d4dd4f831237e87166f362fc Mon Sep 17 00:00:00 2001 From: Hongxin Liu Date: Sun, 23 Apr 2023 17:10:11 +0800 Subject: [PATCH 06/26] [chat] refactor example folder (#14) --- applications/Chat/coati/ray/__init__.py | 2 - .../ray/{src => }/detached_replay_buffer.py | 0 .../ray/{src => }/detached_trainer_base.py | 0 .../ray/{src => }/detached_trainer_ppo.py | 0 applications/Chat/coati/ray/example/1m2t.py | 186 -------------- applications/Chat/coati/ray/example/2m2t.py | 209 ---------------- .../ray/{src => }/experience_maker_holder.py | 0 .../coati/ray/{src => }/pipeline_strategy.py | 63 +++-- applications/Chat/coati/ray/src/__init__.py | 0 .../Chat/coati/ray/{src => }/utils.py | 0 .../ray/example => examples/ray}/1m1t.py | 4 +- .../ray/example => examples/ray}/1m1t.sh | 0 .../example => examples/ray}/1m1t_quantize.py | 4 +- applications/Chat/examples/ray/1m2t.py | 203 ++++++++++++++++ .../ray/example => examples/ray}/1m2t.sh | 2 +- .../example => examples/ray}/1mmt_dummy.py | 6 +- .../ray/example => examples/ray}/2m1t.py | 37 +-- .../ray/example => examples/ray}/2m1t.sh | 0 applications/Chat/examples/ray/2m2t.py | 230 ++++++++++++++++++ .../ray/example => examples/ray}/2m2t.sh | 2 +- 20 files changed, 490 insertions(+), 458 deletions(-) rename applications/Chat/coati/ray/{src => }/detached_replay_buffer.py (100%) rename applications/Chat/coati/ray/{src => }/detached_trainer_base.py (100%) rename applications/Chat/coati/ray/{src => }/detached_trainer_ppo.py (100%) delete mode 100644 applications/Chat/coati/ray/example/1m2t.py delete mode 100644 applications/Chat/coati/ray/example/2m2t.py rename applications/Chat/coati/ray/{src => }/experience_maker_holder.py (100%) rename applications/Chat/coati/ray/{src => }/pipeline_strategy.py (81%) delete mode 100644 applications/Chat/coati/ray/src/__init__.py rename applications/Chat/coati/ray/{src => }/utils.py (100%) rename applications/Chat/{coati/ray/example => examples/ray}/1m1t.py (97%) rename applications/Chat/{coati/ray/example => examples/ray}/1m1t.sh (100%) rename applications/Chat/{coati/ray/example => examples/ray}/1m1t_quantize.py (97%) create mode 100644 applications/Chat/examples/ray/1m2t.py rename applications/Chat/{coati/ray/example => examples/ray}/1m2t.sh (96%) rename applications/Chat/{coati/ray/example => examples/ray}/1mmt_dummy.py (97%) rename applications/Chat/{coati/ray/example => examples/ray}/2m1t.py (90%) rename applications/Chat/{coati/ray/example => examples/ray}/2m1t.sh (100%) create mode 100644 applications/Chat/examples/ray/2m2t.py rename applications/Chat/{coati/ray/example => examples/ray}/2m2t.sh (96%) diff --git a/applications/Chat/coati/ray/__init__.py b/applications/Chat/coati/ray/__init__.py index 5802c05bc03f..e69de29bb2d1 100644 --- a/applications/Chat/coati/ray/__init__.py +++ b/applications/Chat/coati/ray/__init__.py @@ -1,2 +0,0 @@ -from .src.detached_replay_buffer import DetachedReplayBuffer -from .src.detached_trainer_ppo import DetachedPPOTrainer diff --git a/applications/Chat/coati/ray/src/detached_replay_buffer.py b/applications/Chat/coati/ray/detached_replay_buffer.py similarity index 100% rename from applications/Chat/coati/ray/src/detached_replay_buffer.py rename to applications/Chat/coati/ray/detached_replay_buffer.py diff --git a/applications/Chat/coati/ray/src/detached_trainer_base.py b/applications/Chat/coati/ray/detached_trainer_base.py similarity index 100% rename from applications/Chat/coati/ray/src/detached_trainer_base.py rename to applications/Chat/coati/ray/detached_trainer_base.py diff --git a/applications/Chat/coati/ray/src/detached_trainer_ppo.py b/applications/Chat/coati/ray/detached_trainer_ppo.py similarity index 100% rename from applications/Chat/coati/ray/src/detached_trainer_ppo.py rename to applications/Chat/coati/ray/detached_trainer_ppo.py diff --git a/applications/Chat/coati/ray/example/1m2t.py b/applications/Chat/coati/ray/example/1m2t.py deleted file mode 100644 index 3883c364a8e0..000000000000 --- a/applications/Chat/coati/ray/example/1m2t.py +++ /dev/null @@ -1,186 +0,0 @@ -import argparse -from copy import deepcopy - -import pandas as pd -import torch -from coati.trainer import PPOTrainer - - -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder -from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer - -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from coati.experience_maker import NaiveExperienceMaker -from torch.optim import Adam -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - -from colossalai.nn.optimizer import HybridAdam - -import ray -import os -import socket - - -def get_free_port(): - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('', 0)) - return s.getsockname()[1] - - -def get_local_ip(): - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(('8.8.8.8', 80)) - return s.getsockname()[0] - -def main(args): - master_addr = str(get_local_ip()) - # trainer_env_info - trainer_port = str(get_free_port()) - env_info_trainer_1 = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '2', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - env_info_trainer_2 = {'local_rank' : '0', - 'rank' : '1', - 'world_size' : '2', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - # maker_env_info - maker_port = str(get_free_port()) - env_info_maker_1 = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '2', - 'master_port' : maker_port, - 'master_addr' : master_addr} - print([env_info_trainer_1, - env_info_trainer_2, - env_info_maker_1]) - ray.init(dashboard_port = 1145) - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_1_ref = DetachedPPOTrainer.options(name="trainer1", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_1, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - trainer_2_ref = DetachedPPOTrainer.options(name="trainer2", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_2, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug= args.debug, - ) - - # configure Experience Maker - experience_holder_1_ref = ExperienceMakerHolder.options(name="maker1", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1", "trainer2"], - strategy=args.maker_strategy, - env_info=env_info_maker_1, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # trainer send its actor and critic to experience holders. - # TODO: balance duty - ray.get(trainer_1_ref.initialize_remote_makers.remote()) - - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_1_done_ref = trainer_1_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - trainer_2_done_ref = trainer_2_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * args.max_epochs * 2 + 3 # +3 for fault tolerance - maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_1_done_ref, trainer_2_done_ref, maker_1_done_ref]) - # save model checkpoint after fitting - trainer_1_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - trainer_2_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_1_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), - only_rank0=False) - trainer_2_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), - only_rank0=False) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - main(args) diff --git a/applications/Chat/coati/ray/example/2m2t.py b/applications/Chat/coati/ray/example/2m2t.py deleted file mode 100644 index 435c71915fc2..000000000000 --- a/applications/Chat/coati/ray/example/2m2t.py +++ /dev/null @@ -1,209 +0,0 @@ -import argparse -from copy import deepcopy - -import pandas as pd -import torch -from coati.trainer import PPOTrainer - - -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder -from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer - -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from coati.experience_maker import NaiveExperienceMaker -from torch.optim import Adam -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - -from colossalai.nn.optimizer import HybridAdam - -import ray -import os -import socket - - -def get_free_port(): - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('', 0)) - return s.getsockname()[1] - - -def get_local_ip(): - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(('8.8.8.8', 80)) - return s.getsockname()[0] - -def main(args): - master_addr = str(get_local_ip()) - # trainer_env_info - trainer_port = str(get_free_port()) - env_info_trainer_1 = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '2', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - env_info_trainer_2 = {'local_rank' : '0', - 'rank' : '1', - 'world_size' : '2', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - # maker_env_info - maker_port = str(get_free_port()) - env_info_maker_1 = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '2', - 'master_port' : maker_port, - 'master_addr' : master_addr} - env_info_maker_2 = {'local_rank' : '0', - 'rank' : '1', - 'world_size' : '2', - 'master_port': maker_port, - 'master_addr' : master_addr} - print([env_info_trainer_1, - env_info_trainer_2, - env_info_maker_1, - env_info_maker_2]) - ray.init() - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_1_ref = DetachedPPOTrainer.options(name="trainer1", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1", "maker2"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_1, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - trainer_2_ref = DetachedPPOTrainer.options(name="trainer2", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1", "maker2"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_2, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # configure Experience Maker - experience_holder_1_ref = ExperienceMakerHolder.options(name="maker1", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1", "trainer2"], - strategy=args.maker_strategy, - env_info=env_info_maker_1, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - experience_holder_2_ref = ExperienceMakerHolder.options(name="maker2", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1", "trainer2"], - strategy=args.maker_strategy, - env_info=env_info_maker_2, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # trainer send its actor and critic to experience holders. - # TODO: balance duty - ray.get(trainer_1_ref.initialize_remote_makers.remote()) - - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_1_done_ref = trainer_1_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - trainer_2_done_ref = trainer_2_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * args.max_epochs + 3 # +3 for fault tolerance - maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - maker_2_done_ref = experience_holder_2_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_1_done_ref, trainer_2_done_ref, maker_1_done_ref, maker_2_done_ref]) - # save model checkpoint after fitting - trainer_1_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - trainer_2_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_1_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), - only_rank0=False) - trainer_2_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), - only_rank0=False) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - main(args) diff --git a/applications/Chat/coati/ray/src/experience_maker_holder.py b/applications/Chat/coati/ray/experience_maker_holder.py similarity index 100% rename from applications/Chat/coati/ray/src/experience_maker_holder.py rename to applications/Chat/coati/ray/experience_maker_holder.py diff --git a/applications/Chat/coati/ray/src/pipeline_strategy.py b/applications/Chat/coati/ray/pipeline_strategy.py similarity index 81% rename from applications/Chat/coati/ray/src/pipeline_strategy.py rename to applications/Chat/coati/ray/pipeline_strategy.py index 1780839c62ee..4b01a45b176e 100644 --- a/applications/Chat/coati/ray/src/pipeline_strategy.py +++ b/applications/Chat/coati/ray/pipeline_strategy.py @@ -1,42 +1,42 @@ # WIP - -from coati.trainer.strategies import Strategy -from coati.trainer.strategies import NaiveStrategy -from coati.models.base import Actor, RewardModel, Critic +import os +import random +from functools import partial import numpy as np import torch +from coati.models.base import Actor, Critic, RewardModel +from coati.trainer.strategies import NaiveStrategy, Strategy from torch._C._distributed_rpc import _is_current_rpc_agent_set import colossalai -from colossalai.pipeline.pipeline_process_group import ppg -from colossalai.pipeline.rpc._pipeline_schedule import OneFOneBPipelineEngine from colossalai.fx import ColoTracer from colossalai.fx.passes.adding_split_node_pass import balanced_split_pass, split_with_split_nodes_pass from colossalai.pipeline.middleware.adaptor import get_fx_topology - - -import os -from functools import partial -import random +from colossalai.pipeline.pipeline_process_group import ppg +from colossalai.pipeline.rpc._pipeline_schedule import OneFOneBPipelineEngine rpc_is_initialized = _is_current_rpc_agent_set + class PipelineModel(torch.nn.Module): ''' - Actor has 2 kinds of jobs: forward and generate. + Actor has 2 kinds of jobs: forward and generate. better to just pipelinize the inner model ''' - def __init__(self, - model: torch.nn.Module, - stage_num: int, - num_microbatches: int, - data_kwargs = None, - ): + + def __init__( + self, + model: torch.nn.Module, + stage_num: int, + num_microbatches: int, + data_kwargs=None, + ): super().__init__() + # create partition module - def create_partition_module(pp_rank:int, stage_num: int, model, data_kwargs): + def create_partition_module(pp_rank: int, stage_num: int, model, data_kwargs): model.eval() tracer = ColoTracer() meta_args = {k: v.to('meta') for k, v in data_kwargs.items()} @@ -49,10 +49,11 @@ def create_partition_module(pp_rank:int, stage_num: int, model, data_kwargs): if isinstance(submodule, torch.fx.GraphModule): setattr(submodule, '_topo', topo) return split_submodules[pp_rank + 1] - + def partition(model, data_kwargs: dict, pp_rank: int, chunk: int, stage_num: int): partition = create_partition_module(pp_rank, stage_num, model, data_kwargs) return partition + self.inference_engine = OneFOneBPipelineEngine( partition_fn=partial(partition, model, data_kwargs), stage_num=stage_num, @@ -60,38 +61,33 @@ def partition(model, data_kwargs: dict, pp_rank: int, chunk: int, stage_num: int device='cuda', ) - def forward(self, - **model_inputs): + def forward(self, **model_inputs): return self.inference_engine.forward_backward(**model_inputs, forward_only=True) - class PPStrategy(NaiveStrategy): """ Strategy for Pipeline inference (inference only!) - + master node only """ - def __init__( - self, - seed: int = 42 - ): + + def __init__(self, seed: int = 42): self.seed = seed super().__init__() - - + def setup_distributed(self) -> None: colossalai.launch_from_torch({}, seed=self.seed) - ppg.set_global_info(rank = int(os.environ['RANK']), + ppg.set_global_info(rank=int(os.environ['RANK']), world_size=int(os.environ['WORLD_SIZE']), dp_degree=1, tp_degree=1, num_worker_threads=128, device="cuda") - + def model_init_context(self): return super().model_init_context() - + def setup_model(self, model: torch.nn.Module) -> torch.nn.Module: if isinstance(model, Actor) or \ isinstance(model, RewardModel) or \ @@ -102,4 +98,3 @@ def set_seed(self, seed: int) -> None: random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) - diff --git a/applications/Chat/coati/ray/src/__init__.py b/applications/Chat/coati/ray/src/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/applications/Chat/coati/ray/src/utils.py b/applications/Chat/coati/ray/utils.py similarity index 100% rename from applications/Chat/coati/ray/src/utils.py rename to applications/Chat/coati/ray/utils.py diff --git a/applications/Chat/coati/ray/example/1m1t.py b/applications/Chat/examples/ray/1m1t.py similarity index 97% rename from applications/Chat/coati/ray/example/1m1t.py rename to applications/Chat/examples/ray/1m1t.py index 4ad724c1e354..8c291abb1f8b 100644 --- a/applications/Chat/coati/ray/example/1m1t.py +++ b/applications/Chat/examples/ray/1m1t.py @@ -7,8 +7,8 @@ import ray import torch from coati.experience_maker import NaiveExperienceMaker -from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder from coati.trainer import PPOTrainer from coati.trainer.callbacks.performance_evaluator import ( ExperienceMakerPerformanceEvaluator, diff --git a/applications/Chat/coati/ray/example/1m1t.sh b/applications/Chat/examples/ray/1m1t.sh similarity index 100% rename from applications/Chat/coati/ray/example/1m1t.sh rename to applications/Chat/examples/ray/1m1t.sh diff --git a/applications/Chat/coati/ray/example/1m1t_quantize.py b/applications/Chat/examples/ray/1m1t_quantize.py similarity index 97% rename from applications/Chat/coati/ray/example/1m1t_quantize.py rename to applications/Chat/examples/ray/1m1t_quantize.py index dc9c9bf9a1f3..cc54bd1905c6 100644 --- a/applications/Chat/coati/ray/example/1m1t_quantize.py +++ b/applications/Chat/examples/ray/1m1t_quantize.py @@ -5,8 +5,8 @@ import pandas as pd import ray import torch -from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder from transformers import AutoTokenizer, BloomTokenizerFast from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer diff --git a/applications/Chat/examples/ray/1m2t.py b/applications/Chat/examples/ray/1m2t.py new file mode 100644 index 000000000000..1a35beb6221a --- /dev/null +++ b/applications/Chat/examples/ray/1m2t.py @@ -0,0 +1,203 @@ +import argparse +import os +import socket +from copy import deepcopy + +import pandas as pd +import ray +import torch +from coati.experience_maker import NaiveExperienceMaker +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder +from coati.trainer import PPOTrainer +from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy +from torch.optim import Adam +from transformers import AutoTokenizer, BloomTokenizerFast +from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer + +from colossalai.nn.optimizer import HybridAdam + + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainer_1 = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '2', + 'master_port': trainer_port, + 'master_addr': master_addr + } + env_info_trainer_2 = { + 'local_rank': '0', + 'rank': '1', + 'world_size': '2', + 'master_port': trainer_port, + 'master_addr': master_addr + } + # maker_env_info + maker_port = str(get_free_port()) + env_info_maker_1 = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '2', + 'master_port': maker_port, + 'master_addr': master_addr + } + print([env_info_trainer_1, env_info_trainer_2, env_info_maker_1]) + ray.init(dashboard_port=1145) + # configure tokenizer + if args.model == 'gpt2': + tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + tokenizer.pad_token = tokenizer.eos_token + elif args.model == 'bloom': + tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) + tokenizer.pad_token = tokenizer.eos_token + elif args.model == 'opt': + tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") + else: + raise ValueError(f'Unsupported model "{args.model}"') + + # configure Trainer + trainer_1_ref = DetachedPPOTrainer.options(name="trainer1", + namespace=os.environ["RAY_NAMESPACE"], + num_gpus=1, + max_concurrency=2).remote( + experience_maker_holder_name_list=["maker1"], + strategy=args.trainer_strategy, + model=args.model, + env_info=env_info_trainer_1, + pretrained=args.pretrain, + lora_rank=args.lora_rank, + train_batch_size=args.train_batch_size, + buffer_limit=16, + experience_batch_size=args.experience_batch_size, + max_epochs=args.max_epochs, + # kwargs: + max_length=128, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + debug=args.debug, + ) + + trainer_2_ref = DetachedPPOTrainer.options(name="trainer2", + namespace=os.environ["RAY_NAMESPACE"], + num_gpus=1, + max_concurrency=2).remote( + experience_maker_holder_name_list=["maker1"], + strategy=args.trainer_strategy, + model=args.model, + env_info=env_info_trainer_2, + pretrained=args.pretrain, + lora_rank=args.lora_rank, + train_batch_size=args.train_batch_size, + buffer_limit=16, + experience_batch_size=args.experience_batch_size, + max_epochs=args.max_epochs, + # kwargs: + max_length=128, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + debug=args.debug, + ) + + # configure Experience Maker + experience_holder_1_ref = ExperienceMakerHolder.options(name="maker1", + namespace=os.environ["RAY_NAMESPACE"], + num_gpus=1, + max_concurrency=2).remote( + detached_trainer_name_list=["trainer1", "trainer2"], + strategy=args.maker_strategy, + env_info=env_info_maker_1, + experience_batch_size=args.experience_batch_size, + kl_coef=0.1, + # kwargs: + max_length=128, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + debug=args.debug, + ) + + # trainer send its actor and critic to experience holders. + # TODO: balance duty + ray.get(trainer_1_ref.initialize_remote_makers.remote()) + + # configure sampler + dataset = pd.read_csv(args.prompt_path)['prompt'] + + def tokenize_fn(texts): + # MUST padding to max length to ensure inputs of all ranks have the same length + # Different length may lead to hang when using gemini, as different generation steps + batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) + return {k: v.cuda() for k, v in batch.items()} + + trainer_1_done_ref = trainer_1_ref.fit.remote(num_episodes=args.num_episodes, + max_timesteps=args.max_timesteps, + update_timesteps=args.update_timesteps) + trainer_2_done_ref = trainer_2_ref.fit.remote(num_episodes=args.num_episodes, + max_timesteps=args.max_timesteps, + update_timesteps=args.update_timesteps) + num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ + args.max_epochs * 2 + 3 # +3 for fault tolerance + maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) + + ray.get([trainer_1_done_ref, trainer_2_done_ref, maker_1_done_ref]) + # save model checkpoint after fitting + trainer_1_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) + trainer_2_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) + # save optimizer checkpoint on all ranks + if args.need_optim_ckpt: + trainer_1_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % + (torch.cuda.current_device()), + only_rank0=False) + trainer_2_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % + (torch.cuda.current_device()), + only_rank0=False) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('prompt_path') + parser.add_argument('--trainer_strategy', + choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], + default='naive') + parser.add_argument('--maker_strategy', + choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], + default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') + parser.add_argument('--need_optim_ckpt', type=bool, default=False) + parser.add_argument('--num_episodes', type=int, default=10) + parser.add_argument('--max_timesteps', type=int, default=10) + parser.add_argument('--update_timesteps', type=int, default=10) + parser.add_argument('--max_epochs', type=int, default=5) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + main(args) diff --git a/applications/Chat/coati/ray/example/1m2t.sh b/applications/Chat/examples/ray/1m2t.sh similarity index 96% rename from applications/Chat/coati/ray/example/1m2t.sh rename to applications/Chat/examples/ray/1m2t.sh index 669f4141026c..9608526ea7e7 100644 --- a/applications/Chat/coati/ray/example/1m2t.sh +++ b/applications/Chat/examples/ray/1m2t.sh @@ -20,4 +20,4 @@ export RAY_NAMESPACE="admin" python 1m2t.py "/path/to/prompts.csv" --model gpt2 \ --maker_strategy naive --trainer_strategy ddp --lora_rank 2 \ --num_episodes 10 --max_timesteps 10 --update_timesteps 10 \ - --max_epochs 10 #--debug \ No newline at end of file + --max_epochs 10 #--debug diff --git a/applications/Chat/coati/ray/example/1mmt_dummy.py b/applications/Chat/examples/ray/1mmt_dummy.py similarity index 97% rename from applications/Chat/coati/ray/example/1mmt_dummy.py rename to applications/Chat/examples/ray/1mmt_dummy.py index c7619ea6940b..540f4243577d 100644 --- a/applications/Chat/coati/ray/example/1mmt_dummy.py +++ b/applications/Chat/examples/ray/1mmt_dummy.py @@ -5,9 +5,9 @@ import ray import torch -from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder -from coati.ray.src.utils import ( +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder +from coati.ray.utils import ( get_actor_from_args, get_critic_from_args, get_reward_model_from_args, diff --git a/applications/Chat/coati/ray/example/2m1t.py b/applications/Chat/examples/ray/2m1t.py similarity index 90% rename from applications/Chat/coati/ray/example/2m1t.py rename to applications/Chat/examples/ray/2m1t.py index b655de1ab1fa..bed6246ed0d7 100644 --- a/applications/Chat/coati/ray/example/2m1t.py +++ b/applications/Chat/examples/ray/2m1t.py @@ -1,26 +1,22 @@ import argparse +import os +import socket from copy import deepcopy import pandas as pd +import ray import torch +from coati.experience_maker import NaiveExperienceMaker +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder from coati.trainer import PPOTrainer - - -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder -from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer - from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from coati.experience_maker import NaiveExperienceMaker from torch.optim import Adam from transformers import AutoTokenizer, BloomTokenizerFast from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from colossalai.nn.optimizer import HybridAdam -import ray -import os -import socket - def main(args): # configure tokenizer @@ -46,7 +42,7 @@ def main(args): buffer_limit=16, experience_batch_size=args.experience_batch_size, max_epochs=args.max_epochs, - #kwargs: + # kwargs: max_length=128, do_sample=True, temperature=1.0, @@ -62,7 +58,7 @@ def main(args): strategy=args.maker_strategy, experience_batch_size=args.experience_batch_size, kl_coef=0.1, - #kwargs: + # kwargs: max_length=128, do_sample=True, temperature=1.0, @@ -71,13 +67,13 @@ def main(args): eos_token_id=tokenizer.eos_token_id, debug=args.debug, ) - + experience_holder_2_ref = ExperienceMakerHolder.options(name="maker2", num_gpus=1, max_concurrency=2).remote( detached_trainer_name_list=["trainer1"], strategy=args.maker_strategy, experience_batch_size=args.experience_batch_size, kl_coef=0.1, - #kwargs: + # kwargs: max_length=128, do_sample=True, temperature=1.0, @@ -99,20 +95,25 @@ def tokenize_fn(texts): batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) return {k: v.cuda() for k, v in batch.items()} - trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * args.max_epochs // 2 + 3 # +3 for fault tolerance + trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, + max_timesteps=args.max_timesteps, + update_timesteps=args.update_timesteps) + num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ + args.max_epochs // 2 + 3 # +3 for fault tolerance maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) maker_2_done_ref = experience_holder_2_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - + ray.get([trainer_done_ref, maker_1_done_ref, maker_2_done_ref]) # save model checkpoint after fitting trainer_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) # save optimizer checkpoint on all ranks if args.need_optim_ckpt: - trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), + trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % + (torch.cuda.current_device()), only_rank0=False) + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('prompt_path') diff --git a/applications/Chat/coati/ray/example/2m1t.sh b/applications/Chat/examples/ray/2m1t.sh similarity index 100% rename from applications/Chat/coati/ray/example/2m1t.sh rename to applications/Chat/examples/ray/2m1t.sh diff --git a/applications/Chat/examples/ray/2m2t.py b/applications/Chat/examples/ray/2m2t.py new file mode 100644 index 000000000000..05440032ce9f --- /dev/null +++ b/applications/Chat/examples/ray/2m2t.py @@ -0,0 +1,230 @@ +import argparse +import os +import socket +from copy import deepcopy + +import pandas as pd +import ray +import torch +from coati.experience_maker import NaiveExperienceMaker +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder +from coati.trainer import PPOTrainer +from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy +from torch.optim import Adam +from transformers import AutoTokenizer, BloomTokenizerFast +from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer + +from colossalai.nn.optimizer import HybridAdam + + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainer_1 = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '2', + 'master_port': trainer_port, + 'master_addr': master_addr + } + env_info_trainer_2 = { + 'local_rank': '0', + 'rank': '1', + 'world_size': '2', + 'master_port': trainer_port, + 'master_addr': master_addr + } + # maker_env_info + maker_port = str(get_free_port()) + env_info_maker_1 = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '2', + 'master_port': maker_port, + 'master_addr': master_addr + } + env_info_maker_2 = { + 'local_rank': '0', + 'rank': '1', + 'world_size': '2', + 'master_port': maker_port, + 'master_addr': master_addr + } + print([env_info_trainer_1, env_info_trainer_2, env_info_maker_1, env_info_maker_2]) + ray.init() + # configure tokenizer + if args.model == 'gpt2': + tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + tokenizer.pad_token = tokenizer.eos_token + elif args.model == 'bloom': + tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) + tokenizer.pad_token = tokenizer.eos_token + elif args.model == 'opt': + tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") + else: + raise ValueError(f'Unsupported model "{args.model}"') + + # configure Trainer + trainer_1_ref = DetachedPPOTrainer.options(name="trainer1", + namespace=os.environ["RAY_NAMESPACE"], + num_gpus=1, + max_concurrency=2).remote( + experience_maker_holder_name_list=["maker1", "maker2"], + strategy=args.trainer_strategy, + model=args.model, + env_info=env_info_trainer_1, + pretrained=args.pretrain, + lora_rank=args.lora_rank, + train_batch_size=args.train_batch_size, + buffer_limit=16, + experience_batch_size=args.experience_batch_size, + max_epochs=args.max_epochs, + # kwargs: + max_length=128, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + debug=args.debug, + ) + + trainer_2_ref = DetachedPPOTrainer.options(name="trainer2", + namespace=os.environ["RAY_NAMESPACE"], + num_gpus=1, + max_concurrency=2).remote( + experience_maker_holder_name_list=["maker1", "maker2"], + strategy=args.trainer_strategy, + model=args.model, + env_info=env_info_trainer_2, + pretrained=args.pretrain, + lora_rank=args.lora_rank, + train_batch_size=args.train_batch_size, + buffer_limit=16, + experience_batch_size=args.experience_batch_size, + max_epochs=args.max_epochs, + # kwargs: + max_length=128, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + debug=args.debug, + ) + + # configure Experience Maker + experience_holder_1_ref = ExperienceMakerHolder.options(name="maker1", + namespace=os.environ["RAY_NAMESPACE"], + num_gpus=1, + max_concurrency=2).remote( + detached_trainer_name_list=["trainer1", "trainer2"], + strategy=args.maker_strategy, + env_info=env_info_maker_1, + experience_batch_size=args.experience_batch_size, + kl_coef=0.1, + # kwargs: + max_length=128, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + debug=args.debug, + ) + + experience_holder_2_ref = ExperienceMakerHolder.options(name="maker2", + namespace=os.environ["RAY_NAMESPACE"], + num_gpus=1, + max_concurrency=2).remote( + detached_trainer_name_list=["trainer1", "trainer2"], + strategy=args.maker_strategy, + env_info=env_info_maker_2, + experience_batch_size=args.experience_batch_size, + kl_coef=0.1, + # kwargs: + max_length=128, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + debug=args.debug, + ) + + # trainer send its actor and critic to experience holders. + # TODO: balance duty + ray.get(trainer_1_ref.initialize_remote_makers.remote()) + + # configure sampler + dataset = pd.read_csv(args.prompt_path)['prompt'] + + def tokenize_fn(texts): + # MUST padding to max length to ensure inputs of all ranks have the same length + # Different length may lead to hang when using gemini, as different generation steps + batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) + return {k: v.cuda() for k, v in batch.items()} + + trainer_1_done_ref = trainer_1_ref.fit.remote(num_episodes=args.num_episodes, + max_timesteps=args.max_timesteps, + update_timesteps=args.update_timesteps) + trainer_2_done_ref = trainer_2_ref.fit.remote(num_episodes=args.num_episodes, + max_timesteps=args.max_timesteps, + update_timesteps=args.update_timesteps) + num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ + args.max_epochs + 3 # +3 for fault tolerance + maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) + maker_2_done_ref = experience_holder_2_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) + + ray.get([trainer_1_done_ref, trainer_2_done_ref, maker_1_done_ref, maker_2_done_ref]) + # save model checkpoint after fitting + trainer_1_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) + trainer_2_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) + # save optimizer checkpoint on all ranks + if args.need_optim_ckpt: + trainer_1_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % + (torch.cuda.current_device()), + only_rank0=False) + trainer_2_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % + (torch.cuda.current_device()), + only_rank0=False) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('prompt_path') + parser.add_argument('--trainer_strategy', + choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], + default='naive') + parser.add_argument('--maker_strategy', + choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], + default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') + parser.add_argument('--need_optim_ckpt', type=bool, default=False) + parser.add_argument('--num_episodes', type=int, default=10) + parser.add_argument('--max_timesteps', type=int, default=10) + parser.add_argument('--update_timesteps', type=int, default=10) + parser.add_argument('--max_epochs', type=int, default=5) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + main(args) diff --git a/applications/Chat/coati/ray/example/2m2t.sh b/applications/Chat/examples/ray/2m2t.sh similarity index 96% rename from applications/Chat/coati/ray/example/2m2t.sh rename to applications/Chat/examples/ray/2m2t.sh index fb4024766c54..bd8ca84a58fb 100644 --- a/applications/Chat/coati/ray/example/2m2t.sh +++ b/applications/Chat/examples/ray/2m2t.sh @@ -20,4 +20,4 @@ export RAY_NAMESPACE="admin" python 2m2t.py "path/to/prompts.csv" \ --maker_strategy naive --trainer_strategy colossalai_zero2 --lora_rank 2 \ --num_episodes 10 --max_timesteps 10 --update_timesteps 10 \ - --max_epochs 10 --debug \ No newline at end of file + --max_epochs 10 --debug From 5cb497c488e53a6047cb1bc9ce4f50009adf6fc0 Mon Sep 17 00:00:00 2001 From: Hongxin Liu Date: Mon, 24 Apr 2023 10:55:40 +0800 Subject: [PATCH 07/26] [chat] support quant (#15) * [chat] add quant * [chat] add quant example --- applications/Chat/coati/quant/__init__.py | 7 + .../Chat/coati/quant/llama_gptq/__init__.py | 5 + .../Chat/coati/quant/llama_gptq/loader.py | 26 ++ .../coati/quant/llama_gptq/model_utils.py | 13 + .../Chat/coati/quant/llama_gptq/quant.py | 283 ++++++++++++++++++ applications/Chat/coati/quant/utils.py | 28 ++ applications/Chat/coati/ray/utils.py | 4 + applications/Chat/examples/ray/1mmt_dummy.py | 29 +- 8 files changed, 389 insertions(+), 6 deletions(-) create mode 100644 applications/Chat/coati/quant/__init__.py create mode 100644 applications/Chat/coati/quant/llama_gptq/__init__.py create mode 100644 applications/Chat/coati/quant/llama_gptq/loader.py create mode 100644 applications/Chat/coati/quant/llama_gptq/model_utils.py create mode 100644 applications/Chat/coati/quant/llama_gptq/quant.py create mode 100644 applications/Chat/coati/quant/utils.py diff --git a/applications/Chat/coati/quant/__init__.py b/applications/Chat/coati/quant/__init__.py new file mode 100644 index 000000000000..a65a78d07bb8 --- /dev/null +++ b/applications/Chat/coati/quant/__init__.py @@ -0,0 +1,7 @@ +from .llama_gptq import load_quant as llama_load_quant +from .utils import low_resource_init + +__all__ = [ + 'llama_load_quant', + 'low_resource_init', +] diff --git a/applications/Chat/coati/quant/llama_gptq/__init__.py b/applications/Chat/coati/quant/llama_gptq/__init__.py new file mode 100644 index 000000000000..51c8d6316290 --- /dev/null +++ b/applications/Chat/coati/quant/llama_gptq/__init__.py @@ -0,0 +1,5 @@ +from .loader import load_quant + +__all__ = [ + 'load_quant', +] diff --git a/applications/Chat/coati/quant/llama_gptq/loader.py b/applications/Chat/coati/quant/llama_gptq/loader.py new file mode 100644 index 000000000000..5353dc8a2ea3 --- /dev/null +++ b/applications/Chat/coati/quant/llama_gptq/loader.py @@ -0,0 +1,26 @@ +import torch +import torch.nn as nn + +from .model_utils import find_layers +from .quant import make_quant + + +def load_quant(model: nn.Module, checkpoint: str, wbits: int, groupsize: int): + model = model.eval() + layers = find_layers(model) + + # ignore lm head + layers = find_layers(model) + for name in ['lm_head']: + if name in layers: + del layers[name] + + make_quant(model, layers, wbits, groupsize) + + if checkpoint.endswith('.safetensors'): + from safetensors.torch import load_file as safe_load + model.load_state_dict(safe_load(checkpoint)) + else: + model.load_state_dict(torch.load(checkpoint)) + + return model diff --git a/applications/Chat/coati/quant/llama_gptq/model_utils.py b/applications/Chat/coati/quant/llama_gptq/model_utils.py new file mode 100644 index 000000000000..62db171abb52 --- /dev/null +++ b/applications/Chat/coati/quant/llama_gptq/model_utils.py @@ -0,0 +1,13 @@ +# copied from https://github.com/qwopqwop200/GPTQ-for-LLaMa/blob/past/modelutils.py + +import torch +import torch.nn as nn + + +def find_layers(module, layers=[nn.Conv2d, nn.Linear], name=''): + if type(module) in layers: + return {name: module} + res = {} + for name1, child in module.named_children(): + res.update(find_layers(child, layers=layers, name=name + '.' + name1 if name != '' else name1)) + return res diff --git a/applications/Chat/coati/quant/llama_gptq/quant.py b/applications/Chat/coati/quant/llama_gptq/quant.py new file mode 100644 index 000000000000..f7d5b7ce4bd8 --- /dev/null +++ b/applications/Chat/coati/quant/llama_gptq/quant.py @@ -0,0 +1,283 @@ +# copied from https://github.com/qwopqwop200/GPTQ-for-LLaMa/blob/past/quant.py + +import math + +import numpy as np +import torch +import torch.nn as nn + + +def quantize(x, scale, zero, maxq): + q = torch.clamp(torch.round(x / scale) + zero, 0, maxq) + return scale * (q - zero) + + +class Quantizer(nn.Module): + + def __init__(self, shape=1): + super(Quantizer, self).__init__() + self.register_buffer('maxq', torch.tensor(0)) + self.register_buffer('scale', torch.zeros(shape)) + self.register_buffer('zero', torch.zeros(shape)) + + def configure(self, bits, perchannel=False, sym=True, mse=False, norm=2.4, grid=100, maxshrink=.8): + self.maxq = torch.tensor(2**bits - 1) + self.perchannel = perchannel + self.sym = sym + self.mse = mse + self.norm = norm + self.grid = grid + self.maxshrink = maxshrink + + def find_params(self, x, weight=False): + dev = x.device + self.maxq = self.maxq.to(dev) + + shape = x.shape + if self.perchannel: + if weight: + x = x.flatten(1) + else: + if len(shape) == 4: + x = x.permute([1, 0, 2, 3]) + x = x.flatten(1) + if len(shape) == 3: + x = x.reshape((-1, shape[-1])).t() + if len(shape) == 2: + x = x.t() + else: + x = x.flatten().unsqueeze(0) + + tmp = torch.zeros(x.shape[0], device=dev) + xmin = torch.minimum(x.min(1)[0], tmp) + xmax = torch.maximum(x.max(1)[0], tmp) + + if self.sym: + xmax = torch.maximum(torch.abs(xmin), xmax) + tmp = xmin < 0 + if torch.any(tmp): + xmin[tmp] = -xmax[tmp] + tmp = (xmin == 0) & (xmax == 0) + xmin[tmp] = -1 + xmax[tmp] = +1 + + self.scale = (xmax - xmin) / self.maxq + if self.sym: + self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2) + else: + self.zero = torch.round(-xmin / self.scale) + + if self.mse: + best = torch.full([x.shape[0]], float('inf'), device=dev) + for i in range(int(self.maxshrink * self.grid)): + p = 1 - i / self.grid + xmin1 = p * xmin + xmax1 = p * xmax + scale1 = (xmax1 - xmin1) / self.maxq + zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero + q = quantize(x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq) + q -= x + q.abs_() + q.pow_(self.norm) + err = torch.sum(q, 1) + tmp = err < best + if torch.any(tmp): + best[tmp] = err[tmp] + self.scale[tmp] = scale1[tmp] + self.zero[tmp] = zero1[tmp] + if not self.perchannel: + if weight: + tmp = shape[0] + else: + tmp = shape[1] if len(shape) != 3 else shape[2] + self.scale = self.scale.repeat(tmp) + self.zero = self.zero.repeat(tmp) + + if weight: + shape = [-1] + [1] * (len(shape) - 1) + self.scale = self.scale.reshape(shape) + self.zero = self.zero.reshape(shape) + return + if len(shape) == 4: + self.scale = self.scale.reshape((1, -1, 1, 1)) + self.zero = self.zero.reshape((1, -1, 1, 1)) + if len(shape) == 3: + self.scale = self.scale.reshape((1, 1, -1)) + self.zero = self.zero.reshape((1, 1, -1)) + if len(shape) == 2: + self.scale = self.scale.unsqueeze(0) + self.zero = self.zero.unsqueeze(0) + + def quantize(self, x): + if self.ready(): + return quantize(x, self.scale, self.zero, self.maxq) + return x + + def enabled(self): + return self.maxq > 0 + + def ready(self): + return torch.all(self.scale != 0) + + +try: + import quant_cuda +except: + print('CUDA extension not installed.') + +# Assumes layer is perfectly divisible into 256 * 256 blocks + + +class QuantLinear(nn.Module): + + def __init__(self, bits, groupsize, infeatures, outfeatures): + super().__init__() + if bits not in [2, 3, 4, 8]: + raise NotImplementedError("Only 2,3,4,8 bits are supported.") + self.infeatures = infeatures + self.outfeatures = outfeatures + self.bits = bits + if groupsize != -1 and groupsize < 32 and groupsize != int(math.pow(2, int(math.log2(groupsize)))): + raise NotImplementedError("groupsize supports powers of 2 greater than 32. (e.g. : 32,64,128,etc)") + groupsize = groupsize if groupsize != -1 else infeatures + self.groupsize = groupsize + self.register_buffer( + 'qzeros', torch.zeros((math.ceil(infeatures / groupsize), outfeatures // 256 * (bits * 8)), + dtype=torch.int)) + self.register_buffer('scales', torch.zeros((math.ceil(infeatures / groupsize), outfeatures))) + self.register_buffer('bias', torch.zeros(outfeatures)) + self.register_buffer('qweight', torch.zeros((infeatures // 256 * (bits * 8), outfeatures), dtype=torch.int)) + self._initialized_quant_state = False + + def pack(self, linear, scales, zeros): + scales = scales.t().contiguous() + zeros = zeros.t().contiguous() + scale_zeros = zeros * scales + self.scales = scales.clone() + if linear.bias is not None: + self.bias = linear.bias.clone() + + intweight = [] + for idx in range(self.infeatures): + g_idx = idx // self.groupsize + intweight.append( + torch.round((linear.weight.data[:, idx] + scale_zeros[g_idx]) / self.scales[g_idx]).to(torch.int)[:, + None]) + intweight = torch.cat(intweight, dim=1) + intweight = intweight.t().contiguous() + intweight = intweight.numpy().astype(np.uint32) + qweight = np.zeros((intweight.shape[0] // 256 * (self.bits * 8), intweight.shape[1]), dtype=np.uint32) + i = 0 + row = 0 + while row < qweight.shape[0]: + if self.bits in [2, 4, 8]: + for j in range(i, i + (32 // self.bits)): + qweight[row] |= intweight[j] << (self.bits * (j - i)) + i += 32 // self.bits + row += 1 + elif self.bits == 3: + for j in range(i, i + 10): + qweight[row] |= intweight[j] << (3 * (j - i)) + i += 10 + qweight[row] |= intweight[i] << 30 + row += 1 + qweight[row] |= (intweight[i] >> 2) & 1 + i += 1 + for j in range(i, i + 10): + qweight[row] |= intweight[j] << (3 * (j - i) + 1) + i += 10 + qweight[row] |= intweight[i] << 31 + row += 1 + qweight[row] |= (intweight[i] >> 1) & 0x3 + i += 1 + for j in range(i, i + 10): + qweight[row] |= intweight[j] << (3 * (j - i) + 2) + i += 10 + row += 1 + else: + raise NotImplementedError("Only 2,3,4,8 bits are supported.") + + qweight = qweight.astype(np.int32) + self.qweight = torch.from_numpy(qweight) + + zeros -= 1 + zeros = zeros.numpy().astype(np.uint32) + qzeros = np.zeros((zeros.shape[0], zeros.shape[1] // 256 * (self.bits * 8)), dtype=np.uint32) + i = 0 + col = 0 + while col < qzeros.shape[1]: + if self.bits in [2, 4, 8]: + for j in range(i, i + (32 // self.bits)): + qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i)) + i += 32 // self.bits + col += 1 + elif self.bits == 3: + for j in range(i, i + 10): + qzeros[:, col] |= zeros[:, j] << (3 * (j - i)) + i += 10 + qzeros[:, col] |= zeros[:, i] << 30 + col += 1 + qzeros[:, col] |= (zeros[:, i] >> 2) & 1 + i += 1 + for j in range(i, i + 10): + qzeros[:, col] |= zeros[:, j] << (3 * (j - i) + 1) + i += 10 + qzeros[:, col] |= zeros[:, i] << 31 + col += 1 + qzeros[:, col] |= (zeros[:, i] >> 1) & 0x3 + i += 1 + for j in range(i, i + 10): + qzeros[:, col] |= zeros[:, j] << (3 * (j - i) + 2) + i += 10 + col += 1 + else: + raise NotImplementedError("Only 2,3,4,8 bits are supported.") + + qzeros = qzeros.astype(np.int32) + self.qzeros = torch.from_numpy(qzeros) + + def forward(self, x): + intermediate_dtype = torch.float32 + + if not self._initialized_quant_state: + # Do we even have a bias? Check for at least one non-zero element. + if self.bias is not None and bool(torch.any(self.bias != 0)): + # Then make sure it's the right type. + self.bias.data = self.bias.data.to(intermediate_dtype) + else: + self.bias = None + + outshape = list(x.shape) + outshape[-1] = self.outfeatures + x = x.reshape(-1, x.shape[-1]) + if self.bias is None: + y = torch.zeros(x.shape[0], outshape[-1], dtype=intermediate_dtype, device=x.device) + else: + y = self.bias.clone().repeat(x.shape[0], 1) + + output_dtype = x.dtype + x = x.to(intermediate_dtype) + if self.bits == 2: + quant_cuda.vecquant2matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) + elif self.bits == 3: + quant_cuda.vecquant3matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) + elif self.bits == 4: + quant_cuda.vecquant4matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) + elif self.bits == 8: + quant_cuda.vecquant8matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) + else: + raise NotImplementedError("Only 2,3,4,8 bits are supported.") + y = y.to(output_dtype) + return y.reshape(outshape) + + +def make_quant(module, names, bits, groupsize, name=''): + if isinstance(module, QuantLinear): + return + for attr in dir(module): + tmp = getattr(module, attr) + name1 = name + '.' + attr if name != '' else attr + if name1 in names: + setattr(module, attr, QuantLinear(bits, groupsize, tmp.in_features, tmp.out_features)) + for name1, child in module.named_children(): + make_quant(child, names, bits, groupsize, name + '.' + name1 if name != '' else name1) diff --git a/applications/Chat/coati/quant/utils.py b/applications/Chat/coati/quant/utils.py new file mode 100644 index 000000000000..01b8cff0add1 --- /dev/null +++ b/applications/Chat/coati/quant/utils.py @@ -0,0 +1,28 @@ +from contextlib import contextmanager + +import torch + + +def _noop(*args, **kwargs): + pass + + +@contextmanager +def low_resource_init(): + """This context manager disables weight initialization and sets the default float dtype to half. + """ + old_kaiming_uniform_ = torch.nn.init.kaiming_uniform_ + old_uniform_ = torch.nn.init.uniform_ + old_normal_ = torch.nn.init.normal_ + dtype = torch.get_default_dtype() + try: + torch.nn.init.kaiming_uniform_ = _noop + torch.nn.init.uniform_ = _noop + torch.nn.init.normal_ = _noop + torch.set_default_dtype(torch.half) + yield + finally: + torch.nn.init.kaiming_uniform_ = old_kaiming_uniform_ + torch.nn.init.uniform_ = old_uniform_ + torch.nn.init.normal_ = old_normal_ + torch.set_default_dtype(dtype) diff --git a/applications/Chat/coati/ray/utils.py b/applications/Chat/coati/ray/utils.py index 1b14e1c3f1cb..6e62ba0b4841 100644 --- a/applications/Chat/coati/ray/utils.py +++ b/applications/Chat/coati/ray/utils.py @@ -75,6 +75,10 @@ def get_strategy_from_args(strategy: str): strategy_ = ColossalAIStrategy(stage=3, placement_policy='cuda', initial_scale=2**5) elif strategy == 'colossalai_zero2': strategy_ = ColossalAIStrategy(stage=2, placement_policy='cuda') + elif strategy == 'colossalai_gemini_cpu': + strategy = ColossalAIStrategy(stage=3, placement_policy='cpu', initial_scale=2**5) + elif strategy == 'colossalai_zero2_cpu': + strategy = ColossalAIStrategy(stage=2, placement_policy='cpu') else: raise ValueError(f'Unsupported strategy "{strategy}"') return strategy_ diff --git a/applications/Chat/examples/ray/1mmt_dummy.py b/applications/Chat/examples/ray/1mmt_dummy.py index 540f4243577d..d293e6940fbe 100644 --- a/applications/Chat/examples/ray/1mmt_dummy.py +++ b/applications/Chat/examples/ray/1mmt_dummy.py @@ -5,6 +5,7 @@ import ray import torch +from coati.quant import llama_load_quant, low_resource_init from coati.ray.detached_trainer_ppo import DetachedPPOTrainer from coati.ray.experience_maker_holder import ExperienceMakerHolder from coati.ray.utils import ( @@ -15,6 +16,7 @@ ) from torch.utils.data import DataLoader from transformers import AutoConfig, AutoTokenizer +from transformers.modeling_utils import no_init_weights def get_free_port(): @@ -59,9 +61,16 @@ def model_fn(): actor_cfg = AutoConfig.from_pretrained(args.pretrain) critic_cfg = AutoConfig.from_pretrained(args.critic_pretrain) actor = get_actor_from_args(args.model, config=actor_cfg).half().cuda() - critic = get_critic_from_args(args.model, config=critic_cfg).half().cuda() - reward_model = get_reward_model_from_args(args.model, config=critic_cfg).half().cuda() - initial_model = get_actor_from_args(args.model, config=actor_cfg).half().cuda() + critic = get_critic_from_args(args.critic_model, config=critic_cfg).half().cuda() + reward_model = get_reward_model_from_args(args.critic_model, config=critic_cfg).half().cuda() + if args.initial_model_quant_ckpt is not None and args.model == 'llama': + # quantize initial model + with low_resource_init(), no_init_weights(): + initial_model = get_actor_from_args(args.model, config=actor_cfg) + initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, + args.quant_group_size).cuda() + else: + initial_model = get_actor_from_args(args.model, config=actor_cfg).half().cuda() return actor, critic, reward_model, initial_model # configure Experience Maker @@ -86,7 +95,8 @@ def model_fn(): def trainer_model_fn(): actor = get_actor_from_args(args.model, config=AutoConfig.from_pretrained(args.pretrain)).half().cuda() - critic = get_critic_from_args(args.model, config=AutoConfig.from_pretrained(args.critic_pretrain)).half().cuda() + critic = get_critic_from_args(args.critic_model, + config=AutoConfig.from_pretrained(args.critic_pretrain)).half().cuda() return actor, critic # configure Trainer @@ -138,10 +148,14 @@ def build_dataloader(size): parser = argparse.ArgumentParser() parser.add_argument('--num_trainers', type=int, default=1) parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], + choices=[ + 'naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', + 'colossalai_zero2_cpu' + ], default='naive') parser.add_argument('--maker_strategy', choices=['naive'], default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) parser.add_argument('--pretrain', type=str, default=None) parser.add_argument('--critic_pretrain', type=str, default=None) parser.add_argument('--experience_steps', type=int, default=4) @@ -151,6 +165,9 @@ def build_dataloader(size): parser.add_argument('--train_batch_size', type=int, default=8) parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + parser.add_argument('--initial_model_quant_ckpt', type=str, default=None) + parser.add_argument('--quant_bits', type=int, default=4) + parser.add_argument('--quant_group_size', type=int, default=128) parser.add_argument('--debug', action='store_true') args = parser.parse_args() ray.init(namespace=os.environ["RAY_NAMESPACE"]) From 596c7ae2944f1e73a672d43deac1b4889562f695 Mon Sep 17 00:00:00 2001 From: csric <59389055+CsRic@users.noreply.github.com> Date: Mon, 24 Apr 2023 16:01:37 +0800 Subject: [PATCH 08/26] prompt example (#16) * prompt example * prompt load csv data * remove legacy try --------- Co-authored-by: csric --- .../Chat/coati/trainer/strategies/sampler.py | 1 + applications/Chat/examples/ray/1mmt_prompt.py | 164 ++++++++++++++++++ 2 files changed, 165 insertions(+) create mode 100644 applications/Chat/examples/ray/1mmt_prompt.py diff --git a/applications/Chat/coati/trainer/strategies/sampler.py b/applications/Chat/coati/trainer/strategies/sampler.py index d726fa640fa2..65e199dbf029 100644 --- a/applications/Chat/coati/trainer/strategies/sampler.py +++ b/applications/Chat/coati/trainer/strategies/sampler.py @@ -27,6 +27,7 @@ def __init__(self, dataset, num_replicas: int, rank: int) -> None: assert len(indices) == self.num_samples self.indices = indices + def sample(self, batch_size: int) -> list: sampled_indices = np.random.choice(self.indices, batch_size, replace=False) return [self.dataset[idx] for idx in sampled_indices] diff --git a/applications/Chat/examples/ray/1mmt_prompt.py b/applications/Chat/examples/ray/1mmt_prompt.py new file mode 100644 index 000000000000..5baf96eaa508 --- /dev/null +++ b/applications/Chat/examples/ray/1mmt_prompt.py @@ -0,0 +1,164 @@ +import argparse +import os +import socket +from functools import partial + +import pandas as pd +import ray +import torch +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder +from coati.ray.utils import ( + get_actor_from_args, + get_critic_from_args, + get_reward_model_from_args, + get_strategy_from_args, + get_tokenizer_from_args +) + +from torch.utils.data import DataLoader + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_trainers), + 'master_port': trainer_port, + 'master_addr': master_addr + } for rank in range(args.num_trainers)] + + # maker_env_info + maker_port = str(get_free_port()) + env_info_maker = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '1', + 'master_port': maker_port, + 'master_addr': master_addr + } + + # configure tokenizer + tokenizer = get_tokenizer_from_args(args.model) + + def trainer_model_fn(): + actor = get_actor_from_args(args.model, args.pretrain).half().cuda() + critic = get_critic_from_args(args.model, args.critic_pretrain).half().cuda() + return actor, critic + + # configure Trainer + trainer_refs = [ + DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( + experience_maker_holder_name_list=["maker1"], + strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), + model_fn=trainer_model_fn, + env_info=env_info_trainer, + train_batch_size=args.train_batch_size, + buffer_limit=16, + eval_performance=True, + debug=args.debug, + ) for i, env_info_trainer in enumerate(env_info_trainers) + ] + + def model_fn(): + actor = get_actor_from_args(args.model, args.pretrain).half().cuda() + critic = get_critic_from_args(args.model, args.critic_pretrain).half().cuda() + reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).half().cuda() + initial_model = get_actor_from_args(args.model, args.pretrain).half().cuda() + return actor, critic, reward_model, initial_model + + # configure Experience Maker + experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( + detached_trainer_name_list=[f'trainer{i}' for i in range(args.num_trainers)], + strategy_fn=partial(get_strategy_from_args, args.maker_strategy), + model_fn=model_fn, + env_info=env_info_maker, + experience_batch_size=args.experience_batch_size, + kl_coef=0.1, + debug=args.debug, + # sync_models_from_trainers=True, + # generation kwargs: + max_length=512, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + eval_performance=True, + use_cache=True, + ) + + + + # uncomment this function if sync_models_from_trainers is True + # ray.get([ + # trainer_ref.sync_models_to_remote_makers.remote() + # for trainer_ref in trainer_refs + # ]) + + wait_tasks = [] + + total_steps = args.experience_batch_size * args.experience_steps // (args.num_trainers * args.train_batch_size) + for trainer_ref in trainer_refs: + wait_tasks.append( + trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) + + + dataset_size = args.experience_batch_size * 4 + from torch.utils.data import DataLoader + + def build_dataloader(): + def tokenize_fn(texts): + batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) + return {k: v.cuda() for k, v in batch.items()} + + dataset = pd.read_csv(args.prompt_path)['prompt'] + dataloader = DataLoader(dataset=dataset, + batch_size=dataset_size, + shuffle=True, + collate_fn=tokenize_fn + ) + return dataloader + + + wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, + num_steps=args.experience_steps)) + + ray.get(wait_tasks) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--prompt_path', type=str, default=None) + parser.add_argument('--num_trainers', type=int, default=1) + parser.add_argument('--trainer_strategy', + choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], + default='naive') + parser.add_argument('--maker_strategy', choices=['naive'], default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--critic_pretrain', type=str, default=None) + parser.add_argument('--experience_steps', type=int, default=4) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--train_epochs', type=int, default=1) + parser.add_argument('--update_steps', type=int, default=2) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + ray.init(namespace=os.environ["RAY_NAMESPACE"]) + main(args) \ No newline at end of file From 352bee0fa4dafaf53bc51b0e084d3a22d1d7bfb6 Mon Sep 17 00:00:00 2001 From: Hongxin Liu Date: Tue, 25 Apr 2023 10:57:14 +0800 Subject: [PATCH 09/26] [chat] add mmmt dummy example and refactor experience sending (#17) * [chat] add mmmt dummy example * [chat] refactor naive strategy * [chat] fix struck problem * [chat] fix naive strategy * [chat] optimize experience maker sending logic * [chat] refactor sending assignment --- .../Chat/coati/ray/detached_trainer_base.py | 13 +- .../Chat/coati/ray/detached_trainer_ppo.py | 41 ++-- .../Chat/coati/ray/experience_maker_holder.py | 57 ++---- applications/Chat/coati/ray/utils.py | 27 ++- .../coati/trainer/strategies/colossalai.py | 2 +- .../Chat/coati/trainer/strategies/ddp.py | 31 +-- .../Chat/coati/trainer/strategies/naive.py | 36 +++- applications/Chat/examples/ray/1mmt_dummy.py | 7 +- applications/Chat/examples/ray/mmmt_dummy.py | 188 ++++++++++++++++++ 9 files changed, 300 insertions(+), 102 deletions(-) create mode 100644 applications/Chat/examples/ray/mmmt_dummy.py diff --git a/applications/Chat/coati/ray/detached_trainer_base.py b/applications/Chat/coati/ray/detached_trainer_base.py index 1137d8f7b491..a4f666dc5714 100644 --- a/applications/Chat/coati/ray/detached_trainer_base.py +++ b/applications/Chat/coati/ray/detached_trainer_base.py @@ -43,14 +43,15 @@ def __init__(self, self.callbacks = callbacks self.target_holder_name_list = experience_maker_holder_name_list self.target_holder_list = [] - + self._is_target_holder_initialized = False self._debug = debug - def update_target_holder_list(self, experience_maker_holder_name_list): - self.target_holder_name_list = experience_maker_holder_name_list - self.target_holder_list = [] - for name in self.target_holder_name_list: - self.target_holder_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) + def update_target_holder_list(self): + # as the length of target_holder_list may be zero, we need to check it by a bool flag + if not self._is_target_holder_initialized: + for name in self.target_holder_name_list: + self.target_holder_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) + self._is_target_holder_initialized = True @abstractmethod def _update_remote_makers(self, fully_update: bool = False, **kwargs): diff --git a/applications/Chat/coati/ray/detached_trainer_ppo.py b/applications/Chat/coati/ray/detached_trainer_ppo.py index b0630cd0b5ae..c5459c4d96d1 100644 --- a/applications/Chat/coati/ray/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/detached_trainer_ppo.py @@ -17,6 +17,7 @@ get_actor_from_args, get_critic_from_args, get_model_numel, + get_rank, get_strategy_from_args, is_rank_0, set_dist_env, @@ -102,38 +103,36 @@ def __init__( dataloader_pin_memory=dataloader_pin_memory, callbacks=callbacks, debug=debug) + if self._debug: + print(f'[trainer{get_rank()}] will send state dict to {experience_maker_holder_name_list}') @ray.method(concurrency_group="model_io") @torch.no_grad() def _update_remote_makers(self, fully_update: bool = False, **config): # TODO: balance duties - if is_rank_0(): - self.update_target_holder_list(self.target_holder_name_list) - # mark start, ensure order - tasks = [] - for target_holder in self.target_holder_list: - tasks.append(target_holder.update_experience_maker.remote(chunk_start=True, fully_update=fully_update)) - ray.get(tasks) + self.update_target_holder_list() + # mark start, ensure order + tasks = [] + for target_holder in self.target_holder_list: + tasks.append(target_holder.update_experience_maker.remote(chunk_start=True, fully_update=fully_update)) + ray.get(tasks) # sending loop tasks = [] for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_model(self.actor), **config): - if is_rank_0(): - for target_holder in self.target_holder_list: - tasks.append( - target_holder.update_experience_maker.remote(new_actor_state_dict=state_dict_shard, - fully_update=fully_update)) + for target_holder in self.target_holder_list: + tasks.append( + target_holder.update_experience_maker.remote(new_actor_state_dict=state_dict_shard, + fully_update=fully_update)) # sending loop for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_critic(self.critic), **config): - if is_rank_0(): - for target_holder in self.target_holder_list: - tasks.append( - target_holder.update_experience_maker.remote(new_critic_state_dict=state_dict_shard, - fully_update=fully_update)) - ray.get(tasks) - if is_rank_0(): - # mark end for target_holder in self.target_holder_list: - target_holder.update_experience_maker.remote(chunk_end=True, fully_update=fully_update) + tasks.append( + target_holder.update_experience_maker.remote(new_critic_state_dict=state_dict_shard, + fully_update=fully_update)) + ray.get(tasks) + # mark end + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(chunk_end=True, fully_update=fully_update) @ray.method(concurrency_group="compute") def training_step(self, experience: Experience) -> Dict[str, float]: diff --git a/applications/Chat/coati/ray/experience_maker_holder.py b/applications/Chat/coati/ray/experience_maker_holder.py index ebeb58137370..4616c01bdf0f 100644 --- a/applications/Chat/coati/ray/experience_maker_holder.py +++ b/applications/Chat/coati/ray/experience_maker_holder.py @@ -19,7 +19,7 @@ from torch import Tensor from tqdm import tqdm -from .utils import get_model_numel, is_rank_0, set_dist_env +from .utils import get_model_numel, get_rank, get_world_size, is_rank_0, set_dist_env @ray.remote(concurrency_groups={"experience_io": 1, "model_io": 1, "compute": 1}) @@ -50,8 +50,8 @@ def __init__( if env_info: set_dist_env(env_info=env_info) self.target_trainer_list = [] - for name in detached_trainer_name_list: - self.target_trainer_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) + assert len(detached_trainer_name_list) > 0 + self._detached_trainer_name_list = detached_trainer_name_list self.strategy = strategy_fn() self.buffer_cpu_offload = buffer_cpu_offload self.kl_coef = kl_coef @@ -81,8 +81,10 @@ def __init__( self._target_idx = 0 - if self._debug and not self._is_fully_initialized: - print('[maker] Waiting for INIT') + if self._debug: + print(f'[maker{get_rank()}] will send items to {self._detached_trainer_name_list}') + if not self._is_fully_initialized: + print(f'[maker{get_rank()}] Waiting for INIT') def _get_ready(self): while not self._fully_initialized(): @@ -91,10 +93,11 @@ def _get_ready(self): def _fully_initialized(self): return self._is_fully_initialized - def update_target_trainer_list(self, detached_trainer_name_list): - self.target_trainer_list = [] - for name in detached_trainer_name_list: - self.target_trainer_list.append(ray.get_actor(name)) + def _init_target_trainer_list(self): + if len(self.target_trainer_list) > 0: + return + for name in self._detached_trainer_name_list: + self.target_trainer_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) # copy from ../trainer/base.py @ray.method(concurrency_group="compute") @@ -106,43 +109,9 @@ def _make_experience(self, inputs: Union[Tensor, Dict[str, Tensor]]) -> Experien else: raise ValueError(f'Unsupported input type "{type(inputs)}"') - # TODO(ver217): remove this method - @ray.method(concurrency_group="experience_io") - def _send_experience(self, experience): - if not self.target_auto_balance: - # choose the trainer in polling mannar - if not hasattr(self, "_target_idx"): - self._target_idx = 0 - chosen_trainer = self.target_trainer_list[self._target_idx] - if self._debug: - print(f"[maker] sending exp to {chosen_trainer}") - chosen_trainer.buffer_append.remote(experience) - self._target_idx = (self._target_idx + 1) % len(self.target_trainer_list) - else: - # choose a trainer that has the least experience batch in its detached_replay_buffer - chosen_trainer = None - min_length = None - if self._debug: - print("[maker] choosing tartget trainer") - while chosen_trainer is None: - for target_trainer in self.target_trainer_list: - try: - temp_length = ray.get(target_trainer.buffer_get_length.remote(), timeout=0.1) - if min_length is None: - min_length = temp_length - chosen_trainer = target_trainer - else: - if temp_length < min_length: - min_length = temp_length - chosen_trainer = target_trainer - except GetTimeoutError: - pass - if self._debug: - print(f"[maker] sending exp to {chosen_trainer}") - chosen_trainer.buffer_append.remote(experience) - @ray.method(concurrency_group="experience_io") def _send_items(self, experience: Experience) -> None: + self._init_target_trainer_list() items = split_experience_batch(experience) items_per_trainer = [[] for _ in range(len(self.target_trainer_list))] for item in items: diff --git a/applications/Chat/coati/ray/utils.py b/applications/Chat/coati/ray/utils.py index 6e62ba0b4841..bc38bd012d61 100644 --- a/applications/Chat/coati/ray/utils.py +++ b/applications/Chat/coati/ray/utils.py @@ -18,6 +18,14 @@ def is_rank_0() -> bool: return not dist.is_initialized() or dist.get_rank() == 0 +def get_rank() -> int: + return dist.get_rank() if dist.is_initialized() else 0 + + +def get_world_size() -> int: + return dist.get_world_size() if dist.is_initialized() else 1 + + def get_actor_from_args(model: str, pretrained: str = None, config=None, lora_rank=0): if model == 'gpt2': actor = GPTActor(pretrained=pretrained, config=config, lora_rank=lora_rank) @@ -76,9 +84,9 @@ def get_strategy_from_args(strategy: str): elif strategy == 'colossalai_zero2': strategy_ = ColossalAIStrategy(stage=2, placement_policy='cuda') elif strategy == 'colossalai_gemini_cpu': - strategy = ColossalAIStrategy(stage=3, placement_policy='cpu', initial_scale=2**5) + strategy_ = ColossalAIStrategy(stage=3, placement_policy='cpu', initial_scale=2**5) elif strategy == 'colossalai_zero2_cpu': - strategy = ColossalAIStrategy(stage=2, placement_policy='cpu') + strategy_ = ColossalAIStrategy(stage=2, placement_policy='cpu') else: raise ValueError(f'Unsupported strategy "{strategy}"') return strategy_ @@ -126,3 +134,18 @@ def state_dict_to(state_dict: Dict[str, Any], def get_model_numel(model: nn.Module) -> int: numel = sum(p.numel() for p in model.parameters()) return numel + + +def get_receivers_per_sender(sender_idx: int, num_senders: int, num_receivers: int, allow_idle_sender: bool) -> list: + target_receivers = [] + if num_senders <= num_receivers or allow_idle_sender: + # a sender will send data to one or more than one receivers + # a receiver only has one sender + for i in range(num_receivers): + if i % num_senders == sender_idx: + target_receivers.append(i) + else: + # a sender will send data to one receiver + # a receiver may have more than one sender + target_receivers.append(sender_idx % num_receivers) + return target_receivers diff --git a/applications/Chat/coati/trainer/strategies/colossalai.py b/applications/Chat/coati/trainer/strategies/colossalai.py index b809c010247b..d39092d5e7ad 100644 --- a/applications/Chat/coati/trainer/strategies/colossalai.py +++ b/applications/Chat/coati/trainer/strategies/colossalai.py @@ -219,4 +219,4 @@ def get_model_state_dict_shard(self, model: nn.Module, **config): if isinstance(module, LoraLinear): module.merge_weights = True module.eval() - yield from model.state_dict_shard(max_shard_size=1024) + yield from model.state_dict_shard(max_shard_size=1024, only_rank_0=False) diff --git a/applications/Chat/coati/trainer/strategies/ddp.py b/applications/Chat/coati/trainer/strategies/ddp.py index a0fd3fa27a58..4600c63907e8 100644 --- a/applications/Chat/coati/trainer/strategies/ddp.py +++ b/applications/Chat/coati/trainer/strategies/ddp.py @@ -1,13 +1,12 @@ -from typing import Optional - import os import random +from typing import Optional import numpy as np import torch import torch.distributed as dist import torch.nn as nn -from coati.models.base import LM, Actor, RewardModel, Critic +from coati.models.base import LM, Actor, Critic, RewardModel from coati.models.lora import LoraLinear from coati.replay_buffer import ReplayBuffer from torch.nn.parallel import DistributedDataParallel as DDP @@ -30,19 +29,8 @@ def __init__(self, seed: int = 42) -> None: super().__init__() def setup_distributed(self) -> None: - try: - rank = int(os.environ['RANK']) - local_rank = int(os.environ['LOCAL_RANK']) - world_size = int(os.environ['WORLD_SIZE']) - host = os.environ['MASTER_ADDR'] - port = int(os.environ['MASTER_PORT']) - except KeyError as e: - raise RuntimeError( - f"Could not find {e} in the torch environment, visit https://www.colossalai.org/ for more information on launching with torch" - ) - dist.init_process_group('nccl', init_method=f'tcp://[{host}]:{port}', world_size=world_size, rank=rank) + self._try_init_dist(force=True) self.set_seed(self.seed) - torch.cuda.set_device(local_rank) def set_seed(self, seed: int) -> None: random.seed(seed) @@ -74,21 +62,25 @@ def setup_dataloader(self, replay_buffer: ReplayBuffer, pin_memory: bool = False def _unwrap_actor(actor: Actor) -> nn.Module: model: DDP = Strategy._unwrap_actor(actor) return model.module - + @staticmethod def _unwrap_critic(critic: Critic) -> nn.Module: model: DDP = Strategy._unwrap_critic(critic) return model.module - def save_model(self, model: nn.Module, path: str, only_rank0: bool = False, tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: + def save_model(self, + model: nn.Module, + path: str, + only_rank0: bool = False, + tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: if only_rank0 and dist.get_rank() != 0: return None - + for module in model.modules(): if isinstance(module, LoraLinear): module.merge_weights = True module.eval() - + if isinstance(model, RewardModel): state_dict = model.state_dict() if only_rank0 and dist.get_rank() != 0: @@ -114,4 +106,3 @@ def save_optimizer(self, optimizer: Optimizer, path: str, only_rank0: bool = Fal def setup_sampler(self, dataset) -> DistributedSampler: return DistributedSampler(dataset, dist.get_world_size(), dist.get_rank()) - diff --git a/applications/Chat/coati/trainer/strategies/naive.py b/applications/Chat/coati/trainer/strategies/naive.py index a22be1181fb8..a761786e0f21 100644 --- a/applications/Chat/coati/trainer/strategies/naive.py +++ b/applications/Chat/coati/trainer/strategies/naive.py @@ -1,11 +1,13 @@ +import os from typing import Any, Optional import torch +import torch.distributed as dist import torch.nn as nn import torch.optim as optim -from coati.replay_buffer import ReplayBuffer from coati.models.base import LM, RewardModel from coati.models.lora import LoraLinear +from coati.replay_buffer import ReplayBuffer from torch.optim import Optimizer from torch.utils.data import DataLoader from transformers.tokenization_utils_base import PreTrainedTokenizerBase @@ -25,7 +27,7 @@ def optimizer_step(self, optimizer: optim.Optimizer, **kwargs) -> None: optimizer.step() def setup_distributed(self) -> None: - pass + self._try_init_dist(force=False) def setup_model(self, model: nn.Module) -> nn.Module: return model @@ -41,12 +43,16 @@ def setup_dataloader(self, replay_buffer: ReplayBuffer, pin_memory: bool = False pin_memory=pin_memory, collate_fn=replay_buffer.collate_fn) - def save_model(self, model: nn.Module, path: str, only_rank0: bool = False, tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: + def save_model(self, + model: nn.Module, + path: str, + only_rank0: bool = False, + tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: for module in model.modules(): if isinstance(module, LoraLinear): module.merge_weights = True module.eval() - + if isinstance(model, RewardModel): state_dict = model.state_dict() torch.save(state_dict, path) @@ -77,10 +83,28 @@ def get_model_state_dict_shard(self, model: nn.Module, **config): # TODO: implement sharding on naive strategy state_dict = model.state_dict() yield state_dict - + def merge_lora_weight(self, model: nn.Module): unwrapped_model = self._unwrap_model(model) for module in unwrapped_model.modules(): if isinstance(module, LoraLinear): module.merge_weights = True - module.eval() \ No newline at end of file + module.eval() + + def _try_init_dist(self, force: bool = False) -> None: + try: + rank = int(os.environ['RANK']) + local_rank = int(os.environ['LOCAL_RANK']) + world_size = int(os.environ['WORLD_SIZE']) + host = os.environ['MASTER_ADDR'] + port = int(os.environ['MASTER_PORT']) + dist.init_process_group('nccl', init_method=f'tcp://[{host}]:{port}', world_size=world_size, rank=rank) + torch.cuda.set_device(local_rank) + except KeyError as e: + if force: + raise RuntimeError( + f"Could not find {e} in the torch environment, visit https://www.colossalai.org/ for more information on launching with torch" + ) + except Exception as e: + if force: + raise e diff --git a/applications/Chat/examples/ray/1mmt_dummy.py b/applications/Chat/examples/ray/1mmt_dummy.py index d293e6940fbe..d2e820680114 100644 --- a/applications/Chat/examples/ray/1mmt_dummy.py +++ b/applications/Chat/examples/ray/1mmt_dummy.py @@ -11,6 +11,7 @@ from coati.ray.utils import ( get_actor_from_args, get_critic_from_args, + get_receivers_per_sender, get_reward_model_from_args, get_strategy_from_args, ) @@ -74,7 +75,7 @@ def model_fn(): return actor, critic, reward_model, initial_model # configure Experience Maker - experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( + experience_holder_ref = ExperienceMakerHolder.options(name="maker0", num_gpus=1, max_concurrency=2).remote( detached_trainer_name_list=[f'trainer{i}' for i in range(args.num_trainers)], strategy_fn=partial(get_strategy_from_args, args.maker_strategy), model_fn=model_fn, @@ -102,7 +103,9 @@ def trainer_model_fn(): # configure Trainer trainer_refs = [ DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], + experience_maker_holder_name_list=[ + f'maker{x}' for x in get_receivers_per_sender(i, args.num_trainers, 1, allow_idle_sender=True) + ], strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), model_fn=trainer_model_fn, env_info=env_info_trainer, diff --git a/applications/Chat/examples/ray/mmmt_dummy.py b/applications/Chat/examples/ray/mmmt_dummy.py new file mode 100644 index 000000000000..767fe37030f6 --- /dev/null +++ b/applications/Chat/examples/ray/mmmt_dummy.py @@ -0,0 +1,188 @@ +import argparse +import os +import socket +from functools import partial + +import ray +import torch +from coati.quant import llama_load_quant, low_resource_init +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder +from coati.ray.utils import ( + get_actor_from_args, + get_critic_from_args, + get_receivers_per_sender, + get_reward_model_from_args, + get_strategy_from_args, +) +from torch.utils.data import DataLoader +from transformers import AutoConfig, AutoTokenizer +from transformers.modeling_utils import no_init_weights + + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_trainers), + 'master_port': trainer_port, + 'master_addr': master_addr + } for rank in range(args.num_trainers)] + + # maker_env_info + maker_port = str(get_free_port()) + env_info_makers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_makers), + 'master_port': maker_port, + 'master_addr': master_addr + } for rank in range(args.num_makers)] + + # configure tokenizer + tokenizer = AutoTokenizer.from_pretrained(args.pretrain) + tokenizer.pad_token = tokenizer.eos_token + + def model_fn(): + actor_cfg = AutoConfig.from_pretrained(args.pretrain) + critic_cfg = AutoConfig.from_pretrained(args.critic_pretrain) + actor = get_actor_from_args(args.model, config=actor_cfg).half().cuda() + critic = get_critic_from_args(args.critic_model, config=critic_cfg).half().cuda() + reward_model = get_reward_model_from_args(args.critic_model, config=critic_cfg).half().cuda() + if args.initial_model_quant_ckpt is not None and args.model == 'llama': + # quantize initial model + with low_resource_init(), no_init_weights(): + initial_model = get_actor_from_args(args.model, config=actor_cfg) + initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, + args.quant_group_size).cuda() + else: + initial_model = get_actor_from_args(args.model, config=actor_cfg).half().cuda() + return actor, critic, reward_model, initial_model + + # configure Experience Maker + experience_holder_refs = [ + ExperienceMakerHolder.options(name=f"maker{i}", num_gpus=1, max_concurrency=2).remote( + detached_trainer_name_list=[ + f'trainer{x}' + for x in get_receivers_per_sender(i, args.num_makers, args.num_trainers, allow_idle_sender=False) + ], + strategy_fn=partial(get_strategy_from_args, args.maker_strategy), + model_fn=model_fn, + env_info=env_info_maker, + kl_coef=0.1, + debug=args.debug, + # sync_models_from_trainers=True, + # generation kwargs: + max_length=512, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + eval_performance=True, + use_cache=True, + ) + for i, env_info_maker in enumerate(env_info_makers) + ] + + def trainer_model_fn(): + actor = get_actor_from_args(args.model, config=AutoConfig.from_pretrained(args.pretrain)).half().cuda() + critic = get_critic_from_args(args.critic_model, + config=AutoConfig.from_pretrained(args.critic_pretrain)).half().cuda() + return actor, critic + + # configure Trainer + trainer_refs = [ + DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( + experience_maker_holder_name_list=[ + f"maker{x}" + for x in get_receivers_per_sender(i, args.num_trainers, args.num_makers, allow_idle_sender=True) + ], + strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), + model_fn=trainer_model_fn, + env_info=env_info_trainer, + train_batch_size=args.train_batch_size, + buffer_limit=16, + eval_performance=True, + debug=args.debug, + ) + for i, env_info_trainer in enumerate(env_info_trainers) + ] + + dataset_size = args.experience_batch_size * 4 + + def data_gen_fn(): + input_ids = torch.randint(tokenizer.vocab_size, (256,), device=torch.cuda.current_device()) + attn_mask = torch.ones_like(input_ids) + return {'input_ids': input_ids, 'attention_mask': attn_mask} + + def build_dataloader(size): + dataset = [data_gen_fn() for _ in range(size)] + dataloader = DataLoader(dataset, batch_size=args.experience_batch_size) + return dataloader + + # uncomment this function if sync_models_from_trainers is True + # ray.get([ + # trainer_ref.sync_models_to_remote_makers.remote() + # for trainer_ref in trainer_refs + # ]) + + wait_tasks = [] + + for experience_holder_ref in experience_holder_refs: + wait_tasks.append( + experience_holder_ref.workingloop.remote(partial(build_dataloader, dataset_size), + num_steps=args.experience_steps)) + + total_steps = args.experience_batch_size * args.experience_steps * \ + args.num_makers // (args.num_trainers * args.train_batch_size) + for trainer_ref in trainer_refs: + wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) + + ray.get(wait_tasks) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--num_makers', type=int, default=1) + parser.add_argument('--num_trainers', type=int, default=1) + parser.add_argument('--trainer_strategy', + choices=[ + 'naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', + 'colossalai_zero2_cpu' + ], + default='naive') + parser.add_argument('--maker_strategy', choices=['naive'], default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--critic_pretrain', type=str, default=None) + parser.add_argument('--experience_steps', type=int, default=4) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--train_epochs', type=int, default=1) + parser.add_argument('--update_steps', type=int, default=2) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--initial_model_quant_ckpt', type=str, default=None) + parser.add_argument('--quant_bits', type=int, default=4) + parser.add_argument('--quant_group_size', type=int, default=128) + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + ray.init(namespace=os.environ["RAY_NAMESPACE"]) + main(args) From b27779a077fd3c91bebf92b15f4837dd1f380c51 Mon Sep 17 00:00:00 2001 From: Hongxin Liu Date: Tue, 25 Apr 2023 15:56:38 +0800 Subject: [PATCH 10/26] [chat] refactor performance evaluator (#18) --- .../Chat/coati/ray/callbacks/__init__.py | 9 + applications/Chat/coati/ray/callbacks/base.py | 66 ++++++ .../ray/callbacks/performance_evaluator.py | 212 ++++++++++++++++++ .../Chat/coati/ray/detached_trainer_base.py | 44 ++-- .../Chat/coati/ray/detached_trainer_ppo.py | 6 +- .../Chat/coati/ray/experience_maker_holder.py | 36 ++- .../callbacks/performance_evaluator.py | 89 -------- 7 files changed, 347 insertions(+), 115 deletions(-) create mode 100644 applications/Chat/coati/ray/callbacks/__init__.py create mode 100644 applications/Chat/coati/ray/callbacks/base.py create mode 100644 applications/Chat/coati/ray/callbacks/performance_evaluator.py diff --git a/applications/Chat/coati/ray/callbacks/__init__.py b/applications/Chat/coati/ray/callbacks/__init__.py new file mode 100644 index 000000000000..5f5e488f383e --- /dev/null +++ b/applications/Chat/coati/ray/callbacks/__init__.py @@ -0,0 +1,9 @@ +from .base import MakerCallback, TrainerCallback +from .performance_evaluator import ExperienceMakerPerformanceEvaluator, TrainerPerformanceEvaluator + +__all__ = [ + "TrainerCallback", + "MakerCallback", + "ExperienceMakerPerformanceEvaluator", + "TrainerPerformanceEvaluator", +] diff --git a/applications/Chat/coati/ray/callbacks/base.py b/applications/Chat/coati/ray/callbacks/base.py new file mode 100644 index 000000000000..3306150a41ff --- /dev/null +++ b/applications/Chat/coati/ray/callbacks/base.py @@ -0,0 +1,66 @@ +from abc import ABC + +from coati.experience_maker import Experience + + +class TrainerCallback(ABC): + """ + Base callback class. It defines the interface for callbacks. + """ + + def on_fit_start(self) -> None: + pass + + def on_fit_end(self) -> None: + pass + + def on_episode_start(self, episode: int) -> None: + pass + + def on_episode_end(self, episode: int) -> None: + pass + + def on_epoch_start(self, epoch: int) -> None: + pass + + def on_epoch_end(self, epoch: int) -> None: + pass + + def on_batch_start(self) -> None: + pass + + def on_batch_end(self, metrics: dict, experience: Experience) -> None: + pass + + def on_update_start(self) -> None: + pass + + def on_update_end(self) -> None: + pass + + +class MakerCallback(ABC): + + def on_loop_start(self) -> None: + pass + + def on_loop_end(self) -> None: + pass + + def on_make_experience_start(self) -> None: + pass + + def on_make_experience_end(self, experience: Experience) -> None: + pass + + def on_send_start(self) -> None: + pass + + def on_send_end(self) -> None: + pass + + def on_batch_start(self) -> None: + pass + + def on_batch_end(self) -> None: + pass diff --git a/applications/Chat/coati/ray/callbacks/performance_evaluator.py b/applications/Chat/coati/ray/callbacks/performance_evaluator.py new file mode 100644 index 000000000000..cd3517609e7a --- /dev/null +++ b/applications/Chat/coati/ray/callbacks/performance_evaluator.py @@ -0,0 +1,212 @@ +from time import time +from typing import Optional + +import torch +import torch.distributed as dist +from coati.experience_maker import Experience + +from .base import MakerCallback, TrainerCallback + + +def get_world_size() -> int: + if dist.is_initialized(): + return dist.get_world_size() + return 1 + + +def print_rank_0(*args, **kwargs) -> None: + if not dist.is_initialized() or dist.get_rank() == 0: + print(*args, **kwargs) + + +@torch.no_grad() +def all_reduce_mean(x: float, world_size: int) -> float: + if world_size == 1: + return x + tensor = torch.tensor([x], device=torch.cuda.current_device()) + dist.all_reduce(tensor) + tensor = tensor / world_size + return tensor.item() + + +class Timer: + + def __init__(self) -> None: + self.start_time: Optional[float] = None + self.duration: float = 0. + + def start(self) -> None: + self.start_time = time() + + def end(self) -> None: + self.duration += time() - self.start_time + + def reset(self) -> None: + self.duration = 0. + + +class ExperienceMakerPerformanceEvaluator(MakerCallback): + + def __init__(self, actor_num_params: int, critic_num_params: int, initial_model_num_params: int, + reward_model_num_params: int) -> None: + super().__init__() + self.world_size = get_world_size() + self.actor_num_params = actor_num_params + self.critic_num_params = critic_num_params + self.initial_model_num_params = initial_model_num_params + self.reward_model_num_params = reward_model_num_params + + self.batch_timer = Timer() + self.send_timer = Timer() + self.make_experience_timer = Timer() + self.total_samples: int = 0 + self.make_experience_flop: int = 0 + + print_rank_0( + f'ExperienceMaker actor: {actor_num_params/1024**3:.2f}B, critic: {critic_num_params/1024**3:.2f}B, initial model: {initial_model_num_params/1024**3:.2f}B, reward model: {reward_model_num_params/1024**3:.2f}B, world size: {self.world_size}' + ) + + def on_make_experience_start(self) -> None: + self.make_experience_timer.start() + + def on_make_experience_end(self, experience: Experience) -> None: + self.make_experience_timer.end() + + batch_size, seq_len = experience.sequences.shape + + self.total_samples += batch_size + + # actor generate + num_actions = experience.action_mask.size(1) + input_len = seq_len - num_actions + total_seq_len = (input_len + seq_len - 1) * num_actions / 2 + self.make_experience_flop += self.actor_num_params * batch_size * total_seq_len * 2 + # actor forward + self.make_experience_flop += self.actor_num_params * batch_size * seq_len * 2 + # critic forward + self.make_experience_flop += self.critic_num_params * batch_size * seq_len * 2 + # initial model forward + self.make_experience_flop += self.initial_model_num_params * batch_size * seq_len * 2 + # reward model forward + self.make_experience_flop += self.reward_model_num_params * batch_size * seq_len * 2 + + def on_send_start(self) -> None: + self.send_timer.start() + + def on_send_end(self) -> None: + self.send_timer.end() + + def on_batch_start(self) -> None: + self.batch_timer.start() + + def on_batch_end(self) -> None: + self.batch_timer.end() + + def on_loop_end(self) -> None: + avg_make_experience_duration = all_reduce_mean(self.make_experience_timer.duration, self.world_size) + avg_overall_duration = all_reduce_mean(self.batch_timer.duration, self.world_size) + avg_send_duration = all_reduce_mean(self.send_timer.duration, self.world_size) + + avg_throughput = self.total_samples * self.world_size / (avg_overall_duration + 1e-12) + avg_make_experience_tflops = self.make_experience_flop / 1e12 / (avg_make_experience_duration + 1e-12) + avg_time_per_sample = (avg_overall_duration + 1e-12) / (self.total_samples * self.world_size) + avg_make_experience_time_per_sample = (avg_make_experience_duration + 1e-12) / \ + (self.total_samples * self.world_size) + avg_send_time_per_sample = (avg_send_duration + 1e-12) / (self.total_samples * self.world_size) + + print_rank_0( + 'Making Experience Performance Summary:\n' + f'Throughput: {avg_throughput:.3f} samples/sec\n' + + f'TFLOPS per GPU: {avg_make_experience_tflops:.3f}\n' + + f'Sample time (overall): {avg_time_per_sample:.3f} s\n' + + f'Sample time (make experience): {avg_make_experience_time_per_sample:.3f} s, {avg_make_experience_time_per_sample/avg_time_per_sample*100:.2f}%\n' + + + f'Sample time (send): {avg_send_time_per_sample:.3f} s, {avg_send_time_per_sample/avg_time_per_sample*100:.2f}%\n' + ) + + +class TrainerPerformanceEvaluator(TrainerCallback): + + def __init__(self, + actor_num_params: int, + critic_num_params: int, + enable_grad_checkpoint: bool = False, + ignore_first_episodes: int = 1) -> None: + super().__init__() + self.world_size = get_world_size() + self.actor_num_params = actor_num_params + self.critic_num_params = critic_num_params + self.enable_grad_checkpoint = enable_grad_checkpoint + self.ignore_first_episodes = ignore_first_episodes + self.ignore_this_episode = False + + self.episode_timer = Timer() + self.batch_timer = Timer() + self.update_timer = Timer() + self.total_samples: int = 0 + self.learn_flop: int = 0 + + print_rank_0( + f'Trainer actor: {self.actor_num_params/1024**3:.2f}B, critic: {self.critic_num_params/1024**3:.2f}B, world size: {self.world_size}' + ) + + def on_episode_start(self, episodes: int) -> None: + self.ignore_this_episode = episodes < self.ignore_first_episodes + if self.ignore_this_episode: + return + self.episode_timer.start() + + def on_episode_end(self, episodes: int) -> None: + if self.ignore_this_episode: + return + self.episode_timer.end() + + def on_batch_start(self) -> None: + if self.ignore_this_episode: + return + self.batch_timer.start() + + def on_batch_end(self, metrics: dict, experience: Experience) -> None: + if self.ignore_this_episode: + return + self.batch_timer.end() + + batch_size, seq_len = experience.sequences.shape + + self.total_samples += batch_size + + # actor forward-backward, 3 means forward(1) + backward(2) + self.learn_flop += self.actor_num_params * batch_size * seq_len * 2 * (3 + int(self.enable_grad_checkpoint)) + # critic forward-backward + self.learn_flop += self.critic_num_params * batch_size * seq_len * 2 * (3 + int(self.enable_grad_checkpoint)) + + def on_update_start(self) -> None: + if self.ignore_this_episode: + return + self.update_timer.start() + + def on_update_end(self) -> None: + if self.ignore_this_episode: + return + self.update_timer.end() + + def on_fit_end(self) -> None: + if self.total_samples == 0: + print_rank_0('No samples are collected, skip trainer performance evaluation') + return + avg_train_duration = all_reduce_mean(self.batch_timer.duration, self.world_size) + avg_update_duration = all_reduce_mean(self.update_timer.duration, self.world_size) + avg_episode_duration = all_reduce_mean(self.episode_timer.duration, self.world_size) + + avg_throughput = self.total_samples * self.world_size / (avg_episode_duration + 1e-12) + avg_learn_tflops = self.learn_flop / 1e12 / (avg_train_duration + 1e-12) + avg_time_per_sample = (avg_episode_duration + 1e-12) / (self.total_samples * self.world_size) + avg_train_time_per_sample = (avg_train_duration + 1e-12) / (self.total_samples * self.world_size) + avg_update_time_per_sample = (avg_update_duration + 1e-12) / (self.total_samples * self.world_size) + + print_rank_0( + 'Learning Performance Summary:\n' + f'Throughput: {avg_throughput:.3f} samples/sec\n' + + f'TFLOPS per GPU: {avg_learn_tflops:.3f}\n' + f'Sample time (overall): {avg_time_per_sample:.3f} s\n' + + f'Sample time (train): {avg_train_time_per_sample:.3f} s, {avg_train_time_per_sample/avg_time_per_sample*100:.2f}%\n' + + + f'Sample time (update): {avg_update_time_per_sample:.3f} s, {avg_update_time_per_sample/avg_time_per_sample*100:.2f}%\n' + ) diff --git a/applications/Chat/coati/ray/detached_trainer_base.py b/applications/Chat/coati/ray/detached_trainer_base.py index a4f666dc5714..ac2d35e9da19 100644 --- a/applications/Chat/coati/ray/detached_trainer_base.py +++ b/applications/Chat/coati/ray/detached_trainer_base.py @@ -6,10 +6,10 @@ import torch from coati.experience_maker import Experience from coati.replay_buffer.utils import BufferItem -from coati.trainer.callbacks import Callback from torch.utils.data import DataLoader from tqdm import tqdm +from .callbacks import TrainerCallback from .detached_replay_buffer import DetachedReplayBuffer from .utils import is_rank_0 @@ -35,7 +35,7 @@ def __init__(self, train_batch_size: int = 8, buffer_limit: int = 0, dataloader_pin_memory: bool = True, - callbacks: List[Callback] = [], + callbacks: List[TrainerCallback] = [], debug: bool = False) -> None: super().__init__() self.detached_replay_buffer = DetachedReplayBuffer(train_batch_size, limit=buffer_limit) @@ -68,7 +68,9 @@ def _learn(self, update_steps: int, train_epochs: int) -> None: data = [] # warmup pbar = tqdm(range(update_steps), desc=f'Train epoch [1/{train_epochs}]', disable=not is_rank_0()) + self._on_epoch_start(0) self._learn_epoch(pbar, data) + self._on_epoch_end(0) # item is already a batch dataloader = DataLoader(data, batch_size=1, @@ -77,7 +79,9 @@ def _learn(self, update_steps: int, train_epochs: int) -> None: collate_fn=lambda x: x[0]) for epoch in range(1, train_epochs): pbar = tqdm(dataloader, desc=f'Train epoch [{epoch + 1}/{train_epochs}]', disable=not is_rank_0()) + self._on_epoch_start(epoch) self._learn_epoch(pbar, data) + self._on_epoch_end(epoch) def _learn_epoch(self, pbar: tqdm, data: List[Experience]) -> None: is_warmup = len(data) == 0 @@ -87,9 +91,9 @@ def _learn_epoch(self, pbar: tqdm, data: List[Experience]) -> None: # sample a batch and then train to avoid waiting experience = x if not is_warmup else self._buffer_sample() experience.to_device(torch.cuda.current_device()) - self._on_learn_batch_start() + self._on_batch_start() metrics = self.training_step(experience) - self._on_learn_batch_end(metrics, experience) + self._on_batch_end(metrics, experience) if self._debug: print("[trainer] step over") @@ -100,11 +104,14 @@ def _learn_epoch(self, pbar: tqdm, data: List[Experience]) -> None: def fit(self, total_steps: int, update_steps: int, train_epochs: int = 1) -> None: self._on_fit_start() - for _ in tqdm(range(total_steps // update_steps), desc='Trainer', disable=not is_rank_0()): + for i in tqdm(range(total_steps // update_steps), desc='Trainer', disable=not is_rank_0()): + self._on_episode_start(i) self._learn(update_steps, train_epochs) + self._on_update_start() self._update_remote_makers() + self._on_update_end() + self._on_episode_end(i) self._on_fit_end() - self._on_finish() @ray.method(concurrency_group="buffer_length") def buffer_get_length(self): @@ -147,23 +154,26 @@ def _on_episode_end(self, episode: int) -> None: for callback in self.callbacks: callback.on_episode_end(episode) - def _on_learn_epoch_start(self, epoch: int) -> None: + def _on_epoch_start(self, epoch: int) -> None: for callback in self.callbacks: - callback.on_learn_epoch_start(epoch) + callback.on_epoch_start(epoch) - def _on_learn_epoch_end(self, epoch: int) -> None: + def _on_epoch_end(self, epoch: int) -> None: for callback in self.callbacks: - callback.on_learn_epoch_end(epoch) + callback.on_epoch_end(epoch) - def _on_learn_batch_start(self) -> None: + def _on_batch_start(self) -> None: for callback in self.callbacks: - callback.on_learn_batch_start() + callback.on_batch_start() - def _on_learn_batch_end(self, metrics: dict, experience: Experience) -> None: + def _on_batch_end(self, metrics: dict, experience: Experience) -> None: for callback in self.callbacks: - callback.on_learn_batch_end(metrics, experience) + callback.on_batch_end(metrics, experience) - def _on_finish(self) -> None: + def _on_update_start(self) -> None: for callback in self.callbacks: - if hasattr(callback, 'on_finish'): - callback.on_finish() + callback.on_update_start() + + def _on_update_end(self) -> None: + for callback in self.callbacks: + callback.on_update_end() diff --git a/applications/Chat/coati/ray/detached_trainer_ppo.py b/applications/Chat/coati/ray/detached_trainer_ppo.py index c5459c4d96d1..347df3d84589 100644 --- a/applications/Chat/coati/ray/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/detached_trainer_ppo.py @@ -6,12 +6,12 @@ from coati.models.base import Actor, Critic from coati.models.loss import PolicyLoss, ValueLoss from coati.trainer.callbacks import Callback -from coati.trainer.callbacks.performance_evaluator import TrainerPerformaceEvaluator from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy, Strategy from torch.optim import Adam from colossalai.nn.optimizer import HybridAdam +from .callbacks import TrainerCallback, TrainerPerformanceEvaluator from .detached_trainer_base import DetachedTrainer from .utils import ( get_actor_from_args, @@ -64,7 +64,7 @@ def __init__( eps_clip: float = 0.2, value_clip: float = 0.4, dataloader_pin_memory: bool = True, - callbacks: List[Callback] = [], + callbacks: List[TrainerCallback] = [], eval_performance: bool = False, debug: bool = False, ) -> None: @@ -80,7 +80,7 @@ def __init__( if eval_performance: actor_numel = get_model_numel(self.actor) critic_numel = get_model_numel(self.critic) - evaluator = TrainerPerformaceEvaluator(actor_numel, critic_numel) + evaluator = TrainerPerformanceEvaluator(actor_numel, critic_numel) callbacks = callbacks + [evaluator] if isinstance(self.strategy, ColossalAIStrategy): diff --git a/applications/Chat/coati/ray/experience_maker_holder.py b/applications/Chat/coati/ray/experience_maker_holder.py index 4616c01bdf0f..996996400064 100644 --- a/applications/Chat/coati/ray/experience_maker_holder.py +++ b/applications/Chat/coati/ray/experience_maker_holder.py @@ -12,13 +12,13 @@ from coati.models.base import Actor, Critic, RewardModel from coati.replay_buffer.utils import BufferItem, make_experience_batch, split_experience_batch from coati.trainer.callbacks import Callback -from coati.trainer.callbacks.performance_evaluator import ExperienceMakerPerformanceEvaluator from coati.trainer.strategies import Strategy from coati.trainer.strategies.sampler import DistributedSampler from ray.exceptions import GetTimeoutError from torch import Tensor from tqdm import tqdm +from .callbacks import ExperienceMakerPerformanceEvaluator, MakerCallback from .utils import get_model_numel, get_rank, get_world_size, is_rank_0, set_dist_env @@ -42,7 +42,7 @@ def __init__( sync_models_from_trainers: bool = False, buffer_cpu_offload: bool = True, kl_coef: float = 0.1, - callbacks: List[Callback] = [], + callbacks: List[MakerCallback] = [], eval_performance: bool = False, debug: bool = False, **generate_kwargs): @@ -122,13 +122,17 @@ def _send_items(self, experience: Experience) -> None: target_trainer.buffer_extend.remote(items_per_trainer[i]) def _inference_step(self, batch) -> None: + self._on_batch_start() with self._model_visit_lock: self._on_make_experience_start() experience = self._make_experience(batch) self._on_make_experience_end(experience) + self._on_send_start() if self.buffer_cpu_offload: experience.to_device('cpu') self._send_items(experience) + self._on_send_end() + self._on_batch_end() def workingloop(self, dataloader_fn: Callable[[], Iterable], num_epochs: int = 1, num_steps: int = 0): """Working loop of the experience maker. @@ -139,6 +143,7 @@ def workingloop(self, dataloader_fn: Callable[[], Iterable], num_epochs: int = 1 num_steps (int, optional): Iterate the dataloader for number if steps. If this value > 0, num_epochs will be ignored. Defaults to 0. """ self._get_ready() + self._on_loop_start() dataloader = dataloader_fn() if num_steps > 0: # ignore num epochs @@ -156,7 +161,7 @@ def workingloop(self, dataloader_fn: Callable[[], Iterable], num_epochs: int = 1 for batch in dataloader: self._inference_step(batch) pbar.update() - self._on_finish() + self._on_loop_end() @ray.method(concurrency_group="model_io") def update_experience_maker(self, @@ -205,10 +210,29 @@ def _on_make_experience_end(self, experience: Experience) -> None: for callback in self.callbacks: callback.on_make_experience_end(experience) - def _on_finish(self) -> None: + def _on_loop_start(self) -> None: for callback in self.callbacks: - if hasattr(callback, 'on_finish'): - callback.on_finish() + callback.on_loop_start() + + def _on_loop_end(self) -> None: + for callback in self.callbacks: + callback.on_loop_end() + + def _on_send_start(self) -> None: + for callback in self.callbacks: + callback.on_send_start() + + def _on_send_end(self) -> None: + for callback in self.callbacks: + callback.on_send_end() + + def _on_batch_start(self) -> None: + for callback in self.callbacks: + callback.on_batch_start() + + def _on_batch_end(self) -> None: + for callback in self.callbacks: + callback.on_batch_end() def _set_default_generate_kwargs(generate_kwargs: dict, actor: Actor) -> None: diff --git a/applications/Chat/coati/trainer/callbacks/performance_evaluator.py b/applications/Chat/coati/trainer/callbacks/performance_evaluator.py index 0aebd2bf6280..5ca44a52d6e7 100644 --- a/applications/Chat/coati/trainer/callbacks/performance_evaluator.py +++ b/applications/Chat/coati/trainer/callbacks/performance_evaluator.py @@ -29,95 +29,6 @@ def all_reduce_mean(x: float, world_size: int) -> float: return tensor.item() -class ExperienceMakerPerformanceEvaluator(Callback): - - def __init__(self, actor_num_params: int, critic_num_params: int, initial_model_num_params: int, - reward_model_num_params: int) -> None: - super().__init__() - self.world_size = get_world_size() - self.actor_num_params = actor_num_params - self.critic_num_params = critic_num_params - self.initial_model_num_params = initial_model_num_params - self.reward_model_num_params = reward_model_num_params - - self.make_experience_duration: float = 0. - self.make_experience_start_time: Optional[float] = None - self.make_experience_num_samples: int = 0 - self.make_experience_flop: int = 0 - - def on_make_experience_start(self) -> None: - self.make_experience_start_time = time() - - def on_make_experience_end(self, experience: Experience) -> None: - self.make_experience_duration += time() - self.make_experience_start_time - - batch_size, seq_len = experience.sequences.shape - - self.make_experience_num_samples += batch_size - - # actor generate - num_actions = experience.action_mask.size(1) - input_len = seq_len - num_actions - total_seq_len = (input_len + seq_len - 1) * num_actions / 2 - self.make_experience_flop += self.actor_num_params * batch_size * total_seq_len * 2 - # actor forward - self.make_experience_flop += self.actor_num_params * batch_size * seq_len * 2 - # critic forward - self.make_experience_flop += self.critic_num_params * batch_size * seq_len * 2 - # initial model forward - self.make_experience_flop += self.initial_model_num_params * batch_size * seq_len * 2 - # reward model forward - self.make_experience_flop += self.reward_model_num_params * batch_size * seq_len * 2 - - def on_finish(self) -> None: - avg_make_experience_duration = all_reduce_mean(self.make_experience_duration, self.world_size) - - avg_make_experience_throughput = self.make_experience_num_samples / (avg_make_experience_duration + 1e-12) - avg_make_experience_tflops = self.make_experience_flop / 1e12 / (avg_make_experience_duration + 1e-12) - - print_rank_0( - f'Making experience throughput: {avg_make_experience_throughput:.3f} samples/sec, TFLOPS: {avg_make_experience_tflops:.3f}' - ) - - -class TrainerPerformaceEvaluator(Callback): - - def __init__(self, actor_num_params: int, critic_num_params: int, enable_grad_checkpoint: bool = False) -> None: - super().__init__() - self.world_size = get_world_size() - self.actor_num_params = actor_num_params - self.critic_num_params = critic_num_params - self.enable_grad_checkpoint = enable_grad_checkpoint - - self.learn_duration: float = 0. - self.learn_start_time: Optional[float] = None - self.learn_num_samples: int = 0 - self.learn_flop: int = 0 - - def on_learn_batch_start(self) -> None: - self.learn_start_time = time() - - def on_learn_batch_end(self, metrics: dict, experience: Experience) -> None: - self.learn_duration += time() - self.learn_start_time - - batch_size, seq_len = experience.sequences.shape - - self.learn_num_samples += batch_size - - # actor forward-backward, 3 means forward(1) + backward(2) - self.learn_flop += self.actor_num_params * batch_size * seq_len * 2 * (3 + int(self.enable_grad_checkpoint)) - # critic forward-backward - self.learn_flop += self.critic_num_params * batch_size * seq_len * 2 * (3 + int(self.enable_grad_checkpoint)) - - def on_finish(self) -> None: - avg_learn_duration = all_reduce_mean(self.learn_duration, self.world_size) - - avg_learn_throughput = self.learn_num_samples / (avg_learn_duration + 1e-12) - avg_learn_tflops = self.learn_flop / 1e12 / (avg_learn_duration + 1e-12) - - print_rank_0(f'Learning throughput: {avg_learn_throughput:.3f} samples/sec, TFLOPS: {avg_learn_tflops:.3f}') - - class PerformanceEvaluator(Callback): """ Callback for valuate the performance of the model. From 8c95f2a1c3fbd1a1a0228166bf17d5b7705e84bd Mon Sep 17 00:00:00 2001 From: csric <59389055+CsRic@users.noreply.github.com> Date: Wed, 26 Apr 2023 11:26:43 +0800 Subject: [PATCH 11/26] Prompt Example & requires_grad state_dict & sharding state_dict (#19) * prompt example * prompt load csv data * remove legacy try * maker models require_grad set to False * working on zero redundancy update * mmmt_prompt example; naive strategy requires_grad state_dict & sharding; maker model requires_no_grad. * remove legacy examples * remove legacy examples * remove replay buffer tp state. bad design --------- Co-authored-by: csric --- .../Chat/coati/ray/detached_replay_buffer.py | 23 +- .../Chat/coati/ray/detached_trainer_ppo.py | 10 +- .../Chat/coati/ray/experience_maker_holder.py | 1 + applications/Chat/coati/ray/utils.py | 1 + .../coati/trainer/strategies/colossalai.py | 11 +- .../Chat/coati/trainer/strategies/naive.py | 35 ++- applications/Chat/examples/ray/1m1t.py | 166 ------------- applications/Chat/examples/ray/1m1t.sh | 23 -- .../Chat/examples/ray/1m1t_quantize.py | 156 ------------ applications/Chat/examples/ray/1m2t.py | 203 ---------------- applications/Chat/examples/ray/1m2t.sh | 23 -- applications/Chat/examples/ray/1mmt_dummy.py | 10 +- applications/Chat/examples/ray/1mmt_prompt.py | 31 ++- applications/Chat/examples/ray/2m1t.py | 141 ----------- applications/Chat/examples/ray/2m1t.sh | 23 -- applications/Chat/examples/ray/2m2t.py | 230 ------------------ applications/Chat/examples/ray/2m2t.sh | 23 -- applications/Chat/examples/ray/mmmt_dummy.py | 10 +- applications/Chat/examples/ray/mmmt_prompt.py | 191 +++++++++++++++ 19 files changed, 273 insertions(+), 1038 deletions(-) delete mode 100644 applications/Chat/examples/ray/1m1t.py delete mode 100644 applications/Chat/examples/ray/1m1t.sh delete mode 100644 applications/Chat/examples/ray/1m1t_quantize.py delete mode 100644 applications/Chat/examples/ray/1m2t.py delete mode 100644 applications/Chat/examples/ray/1m2t.sh delete mode 100644 applications/Chat/examples/ray/2m1t.py delete mode 100644 applications/Chat/examples/ray/2m1t.sh delete mode 100644 applications/Chat/examples/ray/2m2t.py delete mode 100644 applications/Chat/examples/ray/2m2t.sh create mode 100644 applications/Chat/examples/ray/mmmt_prompt.py diff --git a/applications/Chat/coati/ray/detached_replay_buffer.py b/applications/Chat/coati/ray/detached_replay_buffer.py index 257b0b072493..6a4f5a6d67c2 100644 --- a/applications/Chat/coati/ray/detached_replay_buffer.py +++ b/applications/Chat/coati/ray/detached_replay_buffer.py @@ -26,20 +26,12 @@ class DetachedReplayBuffer: cpu_offload: Whether to offload experience to cpu when sampling. Defaults to True. ''' - def __init__(self, sample_batch_size: int, tp_world_size: int = 1, limit: int = 0) -> None: + def __init__(self, sample_batch_size: int, limit: int = 0) -> None: self.sample_batch_size = sample_batch_size self.limit = limit self.items = Queue(self.limit, actor_options={"num_cpus": 1}) self.batch_collector: List[BufferItem] = [] - ''' - Workers in the same tp group share this buffer and need same sample for one step. - Therefore a held_sample should be returned tp_world_size times before it could be dropped. - worker_state records wheter a worker got the held_sample - ''' - self.tp_world_size = tp_world_size - self.worker_state = [False] * self.tp_world_size - self.held_sample = None - self._worker_state_lock = Lock() + @torch.no_grad() def append(self, experience: Experience) -> None: @@ -70,16 +62,7 @@ def clear(self) -> None: @torch.no_grad() def sample(self, worker_rank=0, to_device="cpu") -> Experience: - self._worker_state_lock.acquire() - if not any(self.worker_state): - self.held_sample = self._sample_and_erase() - self.worker_state[worker_rank] = True - if all(self.worker_state): - self.worker_state = [False] * self.tp_world_size - ret = self.held_sample - else: - ret = copy.deepcopy(self.held_sample) - self._worker_state_lock.release() + ret = self._sample_and_erase() ret.to_device(to_device) return ret diff --git a/applications/Chat/coati/ray/detached_trainer_ppo.py b/applications/Chat/coati/ray/detached_trainer_ppo.py index 347df3d84589..d30158019d65 100644 --- a/applications/Chat/coati/ray/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/detached_trainer_ppo.py @@ -110,6 +110,8 @@ def __init__( @torch.no_grad() def _update_remote_makers(self, fully_update: bool = False, **config): # TODO: balance duties + if not fully_update: + config['requires_grad_only'] = True self.update_target_holder_list() # mark start, ensure order tasks = [] @@ -197,9 +199,9 @@ def _get_unwrapped_critic(self): return self.critic def _get_model_state_dict_shard(self, model: torch.nn.Module, **config): - try: - self.strategy.merge_lora_weight(model) - except AttributeError: - pass + # try: + # self.strategy.merge_lora_weight(model) + # except AttributeError: + # pass for state_dict in self.strategy.get_model_state_dict_shard(model, **config): yield state_dict_to(state_dict) diff --git a/applications/Chat/coati/ray/experience_maker_holder.py b/applications/Chat/coati/ray/experience_maker_holder.py index 996996400064..573771ad6258 100644 --- a/applications/Chat/coati/ray/experience_maker_holder.py +++ b/applications/Chat/coati/ray/experience_maker_holder.py @@ -192,6 +192,7 @@ def update_experience_maker(self, if new_critic_state_dict is not None: self.experience_maker.critic.load_state_dict(new_critic_state_dict, strict=False) + # the lock must be released after both actor and critic being updated if chunk_end: self._model_visit_lock.release() diff --git a/applications/Chat/coati/ray/utils.py b/applications/Chat/coati/ray/utils.py index bc38bd012d61..6cd7c564cc92 100644 --- a/applications/Chat/coati/ray/utils.py +++ b/applications/Chat/coati/ray/utils.py @@ -1,5 +1,6 @@ import os from typing import Any, Callable, Dict, List, Optional +from collections import OrderedDict import torch import torch.distributed as dist diff --git a/applications/Chat/coati/trainer/strategies/colossalai.py b/applications/Chat/coati/trainer/strategies/colossalai.py index d39092d5e7ad..238408b9f676 100644 --- a/applications/Chat/coati/trainer/strategies/colossalai.py +++ b/applications/Chat/coati/trainer/strategies/colossalai.py @@ -214,9 +214,10 @@ def get_model_state_dict_shard(self, model: nn.Module, **config): if self.stage != 3: yield from super().get_model_state_dict_shard(model, **config) else: - unwrapped_model = self._unwrap_model(model) - for module in unwrapped_model.modules(): - if isinstance(module, LoraLinear): - module.merge_weights = True - module.eval() + # unwrapped_model = self._unwrap_model(model) + # for module in unwrapped_model.modules(): + # if isinstance(module, LoraLinear): + # module.merge_weights = True + # module.eval() + model: ZeroDDP = model yield from model.state_dict_shard(max_shard_size=1024, only_rank_0=False) diff --git a/applications/Chat/coati/trainer/strategies/naive.py b/applications/Chat/coati/trainer/strategies/naive.py index a761786e0f21..3b537fdde2d4 100644 --- a/applications/Chat/coati/trainer/strategies/naive.py +++ b/applications/Chat/coati/trainer/strategies/naive.py @@ -1,6 +1,7 @@ import os -from typing import Any, Optional - +import sys +from typing import Any, Optional, Dict +from collections import OrderedDict import torch import torch.distributed as dist import torch.nn as nn @@ -14,6 +15,14 @@ from .base import Strategy +# TODO Move this to a util.py (Moving to ray.util introduces ringed import) +def get_grad_required_state_dict(model: nn.Module): + state_dict = OrderedDict() + for name, parameter in model.named_parameters(): + if parameter.requires_grad: + state_dict[name] = parameter.detach() + return state_dict + class NaiveStrategy(Strategy): """ @@ -81,8 +90,26 @@ def load_optimizer(self, optimizer: Optimizer, path: str, map_location: Any = No def get_model_state_dict_shard(self, model: nn.Module, **config): # TODO: implement sharding on naive strategy - state_dict = model.state_dict() - yield state_dict + if 'requires_grad_only' in config and config['requires_grad_only'] == True: + state_dict = get_grad_required_state_dict(model) + else: + state_dict = model.state_dict() + + if 'shard_size' in config: + shard_size = config['shard_size'] + accumulate_size = 0 + state_dict_shard = OrderedDict() + for name, param in state_dict.items(): + state_dict_shard[name] = param + accumulate_size += param.numel() * param.element_size() + if accumulate_size >= shard_size: + accumulate_size = 0 + yield state_dict_shard + state_dict_shard = OrderedDict() + if accumulate_size > 0: + yield state_dict_shard + else: + yield state_dict def merge_lora_weight(self, model: nn.Module): unwrapped_model = self._unwrap_model(model) diff --git a/applications/Chat/examples/ray/1m1t.py b/applications/Chat/examples/ray/1m1t.py deleted file mode 100644 index 8c291abb1f8b..000000000000 --- a/applications/Chat/examples/ray/1m1t.py +++ /dev/null @@ -1,166 +0,0 @@ -import argparse -import os -import socket -from copy import deepcopy - -import pandas as pd -import ray -import torch -from coati.experience_maker import NaiveExperienceMaker -from coati.ray.detached_trainer_ppo import DetachedPPOTrainer -from coati.ray.experience_maker_holder import ExperienceMakerHolder -from coati.trainer import PPOTrainer -from coati.trainer.callbacks.performance_evaluator import ( - ExperienceMakerPerformanceEvaluator, - TrainerPerformaceEvaluator, -) -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from torch.optim import Adam -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - -from colossalai.nn.optimizer import HybridAdam - - -def get_free_port(): - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('', 0)) - return s.getsockname()[1] - - -def get_local_ip(): - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(('8.8.8.8', 80)) - return s.getsockname()[0] - - -def main(args): - master_addr = str(get_local_ip()) - # trainer_env_info - trainer_port = str(get_free_port()) - env_info_trainer = { - 'local_rank': '0', - 'rank': '0', - 'world_size': '1', - 'master_port': trainer_port, - 'master_addr': master_addr - } - - # maker_env_info - maker_port = str(get_free_port()) - env_info_maker = { - 'local_rank': '0', - 'rank': '0', - 'world_size': '1', - 'master_port': maker_port, - 'master_addr': master_addr - } - - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_ref = DetachedPPOTrainer.options(name="trainer1", num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - eval_performance=True, - debug=args.debug, - ) - - # configure Experience Maker - experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1"], - strategy=args.maker_strategy, - env_info=env_info_maker, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - eval_performance=True, - debug=args.debug, - ) - - # trainer send its actor and critic to experience holders. - ray.get(trainer_ref.initialize_remote_makers.remote()) - - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, - max_timesteps=args.max_timesteps, - update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ - args.max_epochs + 3 # +3 for fault tolerance - maker_done_ref = experience_holder_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_done_ref, maker_done_ref]) - - # save model checkpoint after fitting - trainer_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % - (torch.cuda.current_device()), - only_rank0=False) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"]) - main(args) diff --git a/applications/Chat/examples/ray/1m1t.sh b/applications/Chat/examples/ray/1m1t.sh deleted file mode 100644 index f7c5054c800e..000000000000 --- a/applications/Chat/examples/ray/1m1t.sh +++ /dev/null @@ -1,23 +0,0 @@ -set_n_least_used_CUDA_VISIBLE_DEVICES() { - local n=${1:-"9999"} - echo "GPU Memory Usage:" - local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv \ - | tail -n +2 \ - | nl -v 0 \ - | tee /dev/tty \ - | sort -g -k 2 \ - | awk '{print $1}' \ - | head -n $n) - export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g') - echo "Now CUDA_VISIBLE_DEVICES is set to:" - echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES" -} - -set_n_least_used_CUDA_VISIBLE_DEVICES 2 - -export RAY_NAMESPACE="admin" - -python 1m1t.py "/path/to/prompts.csv" \ - --trainer_strategy colossalai_zero2 --maker_strategy naive --lora_rank 2 --pretrain "facebook/opt-350m" --model 'opt' \ - --num_episodes 10 --max_timesteps 10 --update_timesteps 10 \ - --max_epochs 10 --debug diff --git a/applications/Chat/examples/ray/1m1t_quantize.py b/applications/Chat/examples/ray/1m1t_quantize.py deleted file mode 100644 index cc54bd1905c6..000000000000 --- a/applications/Chat/examples/ray/1m1t_quantize.py +++ /dev/null @@ -1,156 +0,0 @@ -import argparse -import os -import socket - -import pandas as pd -import ray -import torch -from coati.ray.detached_trainer_ppo import DetachedPPOTrainer -from coati.ray.experience_maker_holder import ExperienceMakerHolder -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - - -def get_free_port(): - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('', 0)) - return s.getsockname()[1] - - -def get_local_ip(): - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(('8.8.8.8', 80)) - return s.getsockname()[0] - - -def main(args): - master_addr = str(get_local_ip()) - # trainer_env_info - trainer_port = str(get_free_port()) - env_info_trainer = { - 'local_rank': '0', - 'rank': '0', - 'world_size': '1', - 'master_port': trainer_port, - 'master_addr': master_addr - } - - # maker_env_info - maker_port = str(get_free_port()) - env_info_maker = { - 'local_rank': '0', - 'rank': '0', - 'world_size': '1', - 'master_port': maker_port, - 'master_addr': master_addr - } - - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_ref = DetachedPPOTrainer.options(name="trainer1", num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - eval_performance=True, - ) - - # configure Experience Maker - experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1"], - strategy=args.maker_strategy, - env_info=env_info_maker, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - eval_performance=True, - ) - - # a 'jump wire' to set quantized initial_model and reward_model - - # trainer send its actor and critic to experience holders. - # ray.get(trainer_ref.initialize_remote_makers.remote()) - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, - max_timesteps=args.max_timesteps, - update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ - args.max_epochs + 3 # +3 for fault tolerance - maker_done_ref = experience_holder_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_done_ref, maker_done_ref]) - - # save model checkpoint after fitting - trainer_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % - (torch.cuda.current_device()), - only_rank0=False) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"]) - main(args) diff --git a/applications/Chat/examples/ray/1m2t.py b/applications/Chat/examples/ray/1m2t.py deleted file mode 100644 index 1a35beb6221a..000000000000 --- a/applications/Chat/examples/ray/1m2t.py +++ /dev/null @@ -1,203 +0,0 @@ -import argparse -import os -import socket -from copy import deepcopy - -import pandas as pd -import ray -import torch -from coati.experience_maker import NaiveExperienceMaker -from coati.ray.detached_trainer_ppo import DetachedPPOTrainer -from coati.ray.experience_maker_holder import ExperienceMakerHolder -from coati.trainer import PPOTrainer -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from torch.optim import Adam -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - -from colossalai.nn.optimizer import HybridAdam - - -def get_free_port(): - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('', 0)) - return s.getsockname()[1] - - -def get_local_ip(): - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(('8.8.8.8', 80)) - return s.getsockname()[0] - - -def main(args): - master_addr = str(get_local_ip()) - # trainer_env_info - trainer_port = str(get_free_port()) - env_info_trainer_1 = { - 'local_rank': '0', - 'rank': '0', - 'world_size': '2', - 'master_port': trainer_port, - 'master_addr': master_addr - } - env_info_trainer_2 = { - 'local_rank': '0', - 'rank': '1', - 'world_size': '2', - 'master_port': trainer_port, - 'master_addr': master_addr - } - # maker_env_info - maker_port = str(get_free_port()) - env_info_maker_1 = { - 'local_rank': '0', - 'rank': '0', - 'world_size': '2', - 'master_port': maker_port, - 'master_addr': master_addr - } - print([env_info_trainer_1, env_info_trainer_2, env_info_maker_1]) - ray.init(dashboard_port=1145) - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_1_ref = DetachedPPOTrainer.options(name="trainer1", - namespace=os.environ["RAY_NAMESPACE"], - num_gpus=1, - max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_1, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - trainer_2_ref = DetachedPPOTrainer.options(name="trainer2", - namespace=os.environ["RAY_NAMESPACE"], - num_gpus=1, - max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_2, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # configure Experience Maker - experience_holder_1_ref = ExperienceMakerHolder.options(name="maker1", - namespace=os.environ["RAY_NAMESPACE"], - num_gpus=1, - max_concurrency=2).remote( - detached_trainer_name_list=["trainer1", "trainer2"], - strategy=args.maker_strategy, - env_info=env_info_maker_1, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # trainer send its actor and critic to experience holders. - # TODO: balance duty - ray.get(trainer_1_ref.initialize_remote_makers.remote()) - - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_1_done_ref = trainer_1_ref.fit.remote(num_episodes=args.num_episodes, - max_timesteps=args.max_timesteps, - update_timesteps=args.update_timesteps) - trainer_2_done_ref = trainer_2_ref.fit.remote(num_episodes=args.num_episodes, - max_timesteps=args.max_timesteps, - update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ - args.max_epochs * 2 + 3 # +3 for fault tolerance - maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_1_done_ref, trainer_2_done_ref, maker_1_done_ref]) - # save model checkpoint after fitting - trainer_1_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - trainer_2_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_1_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % - (torch.cuda.current_device()), - only_rank0=False) - trainer_2_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % - (torch.cuda.current_device()), - only_rank0=False) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - main(args) diff --git a/applications/Chat/examples/ray/1m2t.sh b/applications/Chat/examples/ray/1m2t.sh deleted file mode 100644 index 9608526ea7e7..000000000000 --- a/applications/Chat/examples/ray/1m2t.sh +++ /dev/null @@ -1,23 +0,0 @@ -set_n_least_used_CUDA_VISIBLE_DEVICES() { - local n=${1:-"9999"} - echo "GPU Memory Usage:" - local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv \ - | tail -n +2 \ - | nl -v 0 \ - | tee /dev/tty \ - | sort -g -k 2 \ - | awk '{print $1}' \ - | head -n $n) - export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g') - echo "Now CUDA_VISIBLE_DEVICES is set to:" - echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES" -} - -set_n_least_used_CUDA_VISIBLE_DEVICES 2 - -export RAY_NAMESPACE="admin" - -python 1m2t.py "/path/to/prompts.csv" --model gpt2 \ - --maker_strategy naive --trainer_strategy ddp --lora_rank 2 \ - --num_episodes 10 --max_timesteps 10 --update_timesteps 10 \ - --max_epochs 10 #--debug diff --git a/applications/Chat/examples/ray/1mmt_dummy.py b/applications/Chat/examples/ray/1mmt_dummy.py index d2e820680114..eba5213a83d3 100644 --- a/applications/Chat/examples/ray/1mmt_dummy.py +++ b/applications/Chat/examples/ray/1mmt_dummy.py @@ -61,17 +61,17 @@ def main(args): def model_fn(): actor_cfg = AutoConfig.from_pretrained(args.pretrain) critic_cfg = AutoConfig.from_pretrained(args.critic_pretrain) - actor = get_actor_from_args(args.model, config=actor_cfg).half().cuda() - critic = get_critic_from_args(args.critic_model, config=critic_cfg).half().cuda() - reward_model = get_reward_model_from_args(args.critic_model, config=critic_cfg).half().cuda() + actor = get_actor_from_args(args.model, config=actor_cfg).requires_grad_(False).half().cuda() + critic = get_critic_from_args(args.critic_model, config=critic_cfg).requires_grad_(False).half().cuda() + reward_model = get_reward_model_from_args(args.critic_model, config=critic_cfg).requires_grad_(False).half().cuda() if args.initial_model_quant_ckpt is not None and args.model == 'llama': # quantize initial model with low_resource_init(), no_init_weights(): initial_model = get_actor_from_args(args.model, config=actor_cfg) initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, - args.quant_group_size).cuda() + args.quant_group_size).cuda().requires_grad_(False) else: - initial_model = get_actor_from_args(args.model, config=actor_cfg).half().cuda() + initial_model = get_actor_from_args(args.model, config=actor_cfg).requires_grad_(False).half().cuda() return actor, critic, reward_model, initial_model # configure Experience Maker diff --git a/applications/Chat/examples/ray/1mmt_prompt.py b/applications/Chat/examples/ray/1mmt_prompt.py index 5baf96eaa508..bd7224aae749 100644 --- a/applications/Chat/examples/ray/1mmt_prompt.py +++ b/applications/Chat/examples/ray/1mmt_prompt.py @@ -6,6 +6,7 @@ import pandas as pd import ray import torch +from coati.quant import llama_load_quant, low_resource_init from coati.ray.detached_trainer_ppo import DetachedPPOTrainer from coati.ray.experience_maker_holder import ExperienceMakerHolder from coati.ray.utils import ( @@ -17,6 +18,8 @@ ) from torch.utils.data import DataLoader +from transformers import AutoConfig +from transformers.modeling_utils import no_init_weights def get_free_port(): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: @@ -74,10 +77,18 @@ def trainer_model_fn(): ] def model_fn(): - actor = get_actor_from_args(args.model, args.pretrain).half().cuda() - critic = get_critic_from_args(args.model, args.critic_pretrain).half().cuda() - reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).half().cuda() - initial_model = get_actor_from_args(args.model, args.pretrain).half().cuda() + actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() + critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() + reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() + if args.initial_model_quant_ckpt is not None and args.model == 'llama': + # quantize initial model + actor_cfg = AutoConfig.from_pretrained(args.pretrain) + with low_resource_init(), no_init_weights(): + initial_model = get_actor_from_args(args.model, config=actor_cfg) + initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, + args.quant_group_size).cuda().requires_grad_(False) + else: + initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() return actor, critic, reward_model, initial_model # configure Experience Maker @@ -118,7 +129,6 @@ def model_fn(): dataset_size = args.experience_batch_size * 4 - from torch.utils.data import DataLoader def build_dataloader(): def tokenize_fn(texts): @@ -145,10 +155,14 @@ def tokenize_fn(texts): parser.add_argument('--prompt_path', type=str, default=None) parser.add_argument('--num_trainers', type=int, default=1) parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], + choices=[ + 'naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', + 'colossalai_zero2_cpu' + ], default='naive') parser.add_argument('--maker_strategy', choices=['naive'], default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) parser.add_argument('--pretrain', type=str, default=None) parser.add_argument('--critic_pretrain', type=str, default=None) parser.add_argument('--experience_steps', type=int, default=4) @@ -158,6 +172,9 @@ def tokenize_fn(texts): parser.add_argument('--train_batch_size', type=int, default=8) parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + parser.add_argument('--initial_model_quant_ckpt', type=str, default=None) + parser.add_argument('--quant_bits', type=int, default=4) + parser.add_argument('--quant_group_size', type=int, default=128) parser.add_argument('--debug', action='store_true') args = parser.parse_args() ray.init(namespace=os.environ["RAY_NAMESPACE"]) diff --git a/applications/Chat/examples/ray/2m1t.py b/applications/Chat/examples/ray/2m1t.py deleted file mode 100644 index bed6246ed0d7..000000000000 --- a/applications/Chat/examples/ray/2m1t.py +++ /dev/null @@ -1,141 +0,0 @@ -import argparse -import os -import socket -from copy import deepcopy - -import pandas as pd -import ray -import torch -from coati.experience_maker import NaiveExperienceMaker -from coati.ray.detached_trainer_ppo import DetachedPPOTrainer -from coati.ray.experience_maker_holder import ExperienceMakerHolder -from coati.trainer import PPOTrainer -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from torch.optim import Adam -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - -from colossalai.nn.optimizer import HybridAdam - - -def main(args): - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_ref = DetachedPPOTrainer.options(name="trainer1", num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1", "maker2"], - strategy=args.trainer_strategy, - model=args.model, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # configure Experience Maker - experience_holder_1_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1"], - strategy=args.maker_strategy, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - experience_holder_2_ref = ExperienceMakerHolder.options(name="maker2", num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1"], - strategy=args.maker_strategy, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # trainer send its actor and critic to experience holders. - ray.get(trainer_ref.initialize_remote_makers.remote()) - - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, - max_timesteps=args.max_timesteps, - update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ - args.max_epochs // 2 + 3 # +3 for fault tolerance - maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - maker_2_done_ref = experience_holder_2_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_done_ref, maker_1_done_ref, maker_2_done_ref]) - - # save model checkpoint after fitting - trainer_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % - (torch.cuda.current_device()), - only_rank0=False) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"]) - main(args) diff --git a/applications/Chat/examples/ray/2m1t.sh b/applications/Chat/examples/ray/2m1t.sh deleted file mode 100644 index a207d4118d60..000000000000 --- a/applications/Chat/examples/ray/2m1t.sh +++ /dev/null @@ -1,23 +0,0 @@ -set_n_least_used_CUDA_VISIBLE_DEVICES() { - local n=${1:-"9999"} - echo "GPU Memory Usage:" - local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv \ - | tail -n +2 \ - | nl -v 0 \ - | tee /dev/tty \ - | sort -g -k 2 \ - | awk '{print $1}' \ - | head -n $n) - export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g') - echo "Now CUDA_VISIBLE_DEVICES is set to:" - echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES" -} - -set_n_least_used_CUDA_VISIBLE_DEVICES 3 - -export RAY_NAMESPACE="admin" - -python 2m1t.py "/path/to/prompts.csv" \ - --trainer_strategy naive --maker_strategy naive --lora_rank 2 --pretrain "facebook/opt-350m" --model 'opt' \ - --num_episodes 10 --max_timesteps 10 --update_timesteps 10 \ - --max_epochs 10 # --debug diff --git a/applications/Chat/examples/ray/2m2t.py b/applications/Chat/examples/ray/2m2t.py deleted file mode 100644 index 05440032ce9f..000000000000 --- a/applications/Chat/examples/ray/2m2t.py +++ /dev/null @@ -1,230 +0,0 @@ -import argparse -import os -import socket -from copy import deepcopy - -import pandas as pd -import ray -import torch -from coati.experience_maker import NaiveExperienceMaker -from coati.ray.detached_trainer_ppo import DetachedPPOTrainer -from coati.ray.experience_maker_holder import ExperienceMakerHolder -from coati.trainer import PPOTrainer -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from torch.optim import Adam -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - -from colossalai.nn.optimizer import HybridAdam - - -def get_free_port(): - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('', 0)) - return s.getsockname()[1] - - -def get_local_ip(): - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(('8.8.8.8', 80)) - return s.getsockname()[0] - - -def main(args): - master_addr = str(get_local_ip()) - # trainer_env_info - trainer_port = str(get_free_port()) - env_info_trainer_1 = { - 'local_rank': '0', - 'rank': '0', - 'world_size': '2', - 'master_port': trainer_port, - 'master_addr': master_addr - } - env_info_trainer_2 = { - 'local_rank': '0', - 'rank': '1', - 'world_size': '2', - 'master_port': trainer_port, - 'master_addr': master_addr - } - # maker_env_info - maker_port = str(get_free_port()) - env_info_maker_1 = { - 'local_rank': '0', - 'rank': '0', - 'world_size': '2', - 'master_port': maker_port, - 'master_addr': master_addr - } - env_info_maker_2 = { - 'local_rank': '0', - 'rank': '1', - 'world_size': '2', - 'master_port': maker_port, - 'master_addr': master_addr - } - print([env_info_trainer_1, env_info_trainer_2, env_info_maker_1, env_info_maker_2]) - ray.init() - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_1_ref = DetachedPPOTrainer.options(name="trainer1", - namespace=os.environ["RAY_NAMESPACE"], - num_gpus=1, - max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1", "maker2"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_1, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - trainer_2_ref = DetachedPPOTrainer.options(name="trainer2", - namespace=os.environ["RAY_NAMESPACE"], - num_gpus=1, - max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1", "maker2"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_2, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # configure Experience Maker - experience_holder_1_ref = ExperienceMakerHolder.options(name="maker1", - namespace=os.environ["RAY_NAMESPACE"], - num_gpus=1, - max_concurrency=2).remote( - detached_trainer_name_list=["trainer1", "trainer2"], - strategy=args.maker_strategy, - env_info=env_info_maker_1, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - experience_holder_2_ref = ExperienceMakerHolder.options(name="maker2", - namespace=os.environ["RAY_NAMESPACE"], - num_gpus=1, - max_concurrency=2).remote( - detached_trainer_name_list=["trainer1", "trainer2"], - strategy=args.maker_strategy, - env_info=env_info_maker_2, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - # kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # trainer send its actor and critic to experience holders. - # TODO: balance duty - ray.get(trainer_1_ref.initialize_remote_makers.remote()) - - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_1_done_ref = trainer_1_ref.fit.remote(num_episodes=args.num_episodes, - max_timesteps=args.max_timesteps, - update_timesteps=args.update_timesteps) - trainer_2_done_ref = trainer_2_ref.fit.remote(num_episodes=args.num_episodes, - max_timesteps=args.max_timesteps, - update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * \ - args.max_epochs + 3 # +3 for fault tolerance - maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - maker_2_done_ref = experience_holder_2_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_1_done_ref, trainer_2_done_ref, maker_1_done_ref, maker_2_done_ref]) - # save model checkpoint after fitting - trainer_1_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - trainer_2_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_1_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % - (torch.cuda.current_device()), - only_rank0=False) - trainer_2_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % - (torch.cuda.current_device()), - only_rank0=False) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - main(args) diff --git a/applications/Chat/examples/ray/2m2t.sh b/applications/Chat/examples/ray/2m2t.sh deleted file mode 100644 index bd8ca84a58fb..000000000000 --- a/applications/Chat/examples/ray/2m2t.sh +++ /dev/null @@ -1,23 +0,0 @@ -set_n_least_used_CUDA_VISIBLE_DEVICES() { - local n=${1:-"9999"} - echo "GPU Memory Usage:" - local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv \ - | tail -n +2 \ - | nl -v 0 \ - | tee /dev/tty \ - | sort -g -k 2 \ - | awk '{print $1}' \ - | head -n $n) - export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g') - echo "Now CUDA_VISIBLE_DEVICES is set to:" - echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES" -} - -set_n_least_used_CUDA_VISIBLE_DEVICES 2 - -export RAY_NAMESPACE="admin" - -python 2m2t.py "path/to/prompts.csv" \ - --maker_strategy naive --trainer_strategy colossalai_zero2 --lora_rank 2 \ - --num_episodes 10 --max_timesteps 10 --update_timesteps 10 \ - --max_epochs 10 --debug diff --git a/applications/Chat/examples/ray/mmmt_dummy.py b/applications/Chat/examples/ray/mmmt_dummy.py index 767fe37030f6..082f4851777e 100644 --- a/applications/Chat/examples/ray/mmmt_dummy.py +++ b/applications/Chat/examples/ray/mmmt_dummy.py @@ -61,17 +61,17 @@ def main(args): def model_fn(): actor_cfg = AutoConfig.from_pretrained(args.pretrain) critic_cfg = AutoConfig.from_pretrained(args.critic_pretrain) - actor = get_actor_from_args(args.model, config=actor_cfg).half().cuda() - critic = get_critic_from_args(args.critic_model, config=critic_cfg).half().cuda() - reward_model = get_reward_model_from_args(args.critic_model, config=critic_cfg).half().cuda() + actor = get_actor_from_args(args.model, config=actor_cfg).requires_grad_(False).half().cuda() + critic = get_critic_from_args(args.critic_model, config=critic_cfg).requires_grad_(False).half().cuda() + reward_model = get_reward_model_from_args(args.critic_model, config=critic_cfg).requires_grad_(False).half().cuda() if args.initial_model_quant_ckpt is not None and args.model == 'llama': # quantize initial model with low_resource_init(), no_init_weights(): initial_model = get_actor_from_args(args.model, config=actor_cfg) initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, - args.quant_group_size).cuda() + args.quant_group_size).cuda().requires_grad_(False) else: - initial_model = get_actor_from_args(args.model, config=actor_cfg).half().cuda() + initial_model = get_actor_from_args(args.model, config=actor_cfg).requires_grad_(False).half().cuda() return actor, critic, reward_model, initial_model # configure Experience Maker diff --git a/applications/Chat/examples/ray/mmmt_prompt.py b/applications/Chat/examples/ray/mmmt_prompt.py new file mode 100644 index 000000000000..d2398d451c7b --- /dev/null +++ b/applications/Chat/examples/ray/mmmt_prompt.py @@ -0,0 +1,191 @@ +import argparse +import os +import socket +from functools import partial + +import ray +import torch +import pandas as pd +from coati.quant import llama_load_quant, low_resource_init +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder +from coati.ray.utils import ( + get_actor_from_args, + get_critic_from_args, + get_receivers_per_sender, + get_reward_model_from_args, + get_strategy_from_args, +) +from torch.utils.data import DataLoader +from transformers import AutoConfig, AutoTokenizer +from transformers.modeling_utils import no_init_weights + + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_trainers), + 'master_port': trainer_port, + 'master_addr': master_addr + } for rank in range(args.num_trainers)] + + # maker_env_info + maker_port = str(get_free_port()) + env_info_makers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_makers), + 'master_port': maker_port, + 'master_addr': master_addr + } for rank in range(args.num_makers)] + + # configure tokenizer + tokenizer = AutoTokenizer.from_pretrained(args.pretrain) + tokenizer.pad_token = tokenizer.eos_token + + def model_fn(): + actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() + critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() + reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() + if args.initial_model_quant_ckpt is not None and args.model == 'llama': + # quantize initial model + actor_cfg = AutoConfig.from_pretrained(args.pretrain) + with low_resource_init(), no_init_weights(): + initial_model = get_actor_from_args(args.model, config=actor_cfg) + initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, + args.quant_group_size).cuda().requires_grad_(False) + else: + initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() + return actor, critic, reward_model, initial_model + + # configure Experience Maker + experience_holder_refs = [ + ExperienceMakerHolder.options(name=f"maker{i}", num_gpus=1, max_concurrency=2).remote( + detached_trainer_name_list=[ + f'trainer{x}' + for x in get_receivers_per_sender(i, args.num_makers, args.num_trainers, allow_idle_sender=False) + ], + strategy_fn=partial(get_strategy_from_args, args.maker_strategy), + model_fn=model_fn, + env_info=env_info_maker, + kl_coef=0.1, + debug=args.debug, + # sync_models_from_trainers=True, + # generation kwargs: + max_length=512, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + eval_performance=True, + use_cache=True, + ) + for i, env_info_maker in enumerate(env_info_makers) + ] + + def trainer_model_fn(): + actor = get_actor_from_args(args.model, args.pretrain).half().cuda() + critic = get_critic_from_args(args.model, args.critic_pretrain).half().cuda() + return actor, critic + + # configure Trainer + trainer_refs = [ + DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( + experience_maker_holder_name_list=[ + f"maker{x}" + for x in get_receivers_per_sender(i, args.num_trainers, args.num_makers, allow_idle_sender=True) + ], + strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), + model_fn=trainer_model_fn, + env_info=env_info_trainer, + train_batch_size=args.train_batch_size, + buffer_limit=16, + eval_performance=True, + debug=args.debug, + ) + for i, env_info_trainer in enumerate(env_info_trainers) + ] + + dataset_size = args.experience_batch_size * 4 + + def build_dataloader(): + def tokenize_fn(texts): + batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) + return {k: v.cuda() for k, v in batch.items()} + + dataset = pd.read_csv(args.prompt_path)['prompt'] + dataloader = DataLoader(dataset=dataset, + batch_size=dataset_size, + shuffle=True, + collate_fn=tokenize_fn + ) + return dataloader + + # uncomment this function if sync_models_from_trainers is True + # ray.get([ + # trainer_ref.sync_models_to_remote_makers.remote() + # for trainer_ref in trainer_refs + # ]) + + wait_tasks = [] + + for experience_holder_ref in experience_holder_refs: + wait_tasks.append( + experience_holder_ref.workingloop.remote(build_dataloader, + num_steps=args.experience_steps)) + + total_steps = args.experience_batch_size * args.experience_steps * \ + args.num_makers // (args.num_trainers * args.train_batch_size) + for trainer_ref in trainer_refs: + wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) + + ray.get(wait_tasks) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--prompt_path', type=str, default=None) + parser.add_argument('--num_makers', type=int, default=1) + parser.add_argument('--num_trainers', type=int, default=1) + parser.add_argument('--trainer_strategy', + choices=[ + 'naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', + 'colossalai_zero2_cpu' + ], + default='naive') + parser.add_argument('--maker_strategy', choices=['naive'], default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--critic_pretrain', type=str, default=None) + parser.add_argument('--experience_steps', type=int, default=4) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--train_epochs', type=int, default=1) + parser.add_argument('--update_steps', type=int, default=2) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--initial_model_quant_ckpt', type=str, default=None) + parser.add_argument('--quant_bits', type=int, default=4) + parser.add_argument('--quant_group_size', type=int, default=128) + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + ray.init(namespace=os.environ["RAY_NAMESPACE"]) + main(args) From e3dc3151a77a9b66d32de44962ce84fd4f44ad9b Mon Sep 17 00:00:00 2001 From: csric <59389055+CsRic@users.noreply.github.com> Date: Fri, 28 Apr 2023 15:46:54 +0800 Subject: [PATCH 12/26] state_dict sending adapts to new unwrap function (#20) * prompt example * prompt load csv data * remove legacy try * maker models require_grad set to False * working on zero redundancy update * mmmt_prompt example; naive strategy requires_grad state_dict & sharding; maker model requires_no_grad. * remove legacy examples * remove legacy examples * remove replay buffer tp state. bad design * opt benchmark * better script * nothing * [chat] strategy refactor unwrap model * [chat] strategy refactor save model * [chat] add docstr * [chat] refactor trainer save model * [chat] fix strategy typing * [chat] refactor trainer save model * [chat] update readme * [chat] fix unit test * working on lora reconstruction * state_dict sending adapts to new unwrap function * remove comments --------- Co-authored-by: csric Co-authored-by: ver217 --- applications/Chat/coati/models/generation.py | 1 + .../Chat/coati/ray/detached_trainer_ppo.py | 27 ++-------- applications/Chat/coati/ray/utils.py | 50 ++++++++++++++----- .../Chat/coati/trainer/strategies/base.py | 8 +-- .../coati/trainer/strategies/colossalai.py | 24 ++++----- .../Chat/coati/trainer/strategies/naive.py | 26 ++++++---- applications/Chat/examples/ray/.gitignore | 1 + applications/Chat/examples/ray/benchmark.sh | 39 +++++++++++++++ applications/Chat/examples/ray/mmmt_prompt.py | 4 +- 9 files changed, 115 insertions(+), 65 deletions(-) create mode 100644 applications/Chat/examples/ray/.gitignore create mode 100644 applications/Chat/examples/ray/benchmark.sh diff --git a/applications/Chat/coati/models/generation.py b/applications/Chat/coati/models/generation.py index 961f2aec677d..f57c9458a271 100644 --- a/applications/Chat/coati/models/generation.py +++ b/applications/Chat/coati/models/generation.py @@ -77,6 +77,7 @@ def sample(model: nn.Module, input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) if update_model_kwargs_fn is not None: model_kwargs = update_model_kwargs_fn(outputs, model_kwargs) + # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long()) diff --git a/applications/Chat/coati/ray/detached_trainer_ppo.py b/applications/Chat/coati/ray/detached_trainer_ppo.py index d30158019d65..d3dfc6e93a46 100644 --- a/applications/Chat/coati/ray/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/detached_trainer_ppo.py @@ -120,13 +120,14 @@ def _update_remote_makers(self, fully_update: bool = False, **config): ray.get(tasks) # sending loop tasks = [] - for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_model(self.actor), **config): + + for state_dict_shard in self._get_model_state_dict_shard(self.actor, **config): for target_holder in self.target_holder_list: tasks.append( target_holder.update_experience_maker.remote(new_actor_state_dict=state_dict_shard, fully_update=fully_update)) # sending loop - for state_dict_shard in self._get_model_state_dict_shard(self.strategy._unwrap_critic(self.critic), **config): + for state_dict_shard in self._get_model_state_dict_shard(self.critic, **config): for target_holder in self.target_holder_list: tasks.append( target_holder.update_experience_maker.remote(new_critic_state_dict=state_dict_shard, @@ -176,28 +177,6 @@ def strategy_save_actor_optim(self, path: str, only_rank0: bool = False) -> None def strategy_save_critic_optim(self, path: str, only_rank0: bool = False) -> None: self.strategy.save_optimizer(self.critic_optim, path, only_rank0) - def _get_unwrapped_actor(self): - if False: - pass - elif isinstance(self.strategy, ColossalAIStrategy): - ret = Actor(self.strategy._unwrap_model(self.actor)) - return ret - elif isinstance(self.strategy, DDPStrategy): - return Actor(self.strategy._unwrap_actor(self.actor)) - elif isinstance(self.strategy, NaiveStrategy): - return self.actor - - def _get_unwrapped_critic(self): - if False: - pass - elif isinstance(self.strategy, ColossalAIStrategy): - ret = self.strategy._unwrap_model(self.critic) - return ret - elif isinstance(self.strategy, DDPStrategy): - return self.critic.module - elif isinstance(self.strategy, NaiveStrategy): - return self.critic - def _get_model_state_dict_shard(self, model: torch.nn.Module, **config): # try: # self.strategy.merge_lora_weight(model) diff --git a/applications/Chat/coati/ray/utils.py b/applications/Chat/coati/ray/utils.py index 6cd7c564cc92..48f33e70c632 100644 --- a/applications/Chat/coati/ray/utils.py +++ b/applications/Chat/coati/ray/utils.py @@ -120,18 +120,6 @@ def set_dist_env(env_info: Dict[str, str]): os.environ['MASTER_ADDR'] = env_info['master_addr'] -def state_dict_to(state_dict: Dict[str, Any], - dtype: torch.dtype = torch.float16, - device: torch.device = torch.device('cpu')): - ''' - keep state_dict intact - ''' - new_state_dict = {} - for k, v in state_dict.items(): - new_state_dict[k] = v.to(dtype=dtype, device=device) - return new_state_dict - - def get_model_numel(model: nn.Module) -> int: numel = sum(p.numel() for p in model.parameters()) return numel @@ -150,3 +138,41 @@ def get_receivers_per_sender(sender_idx: int, num_senders: int, num_receivers: i # a receiver may have more than one sender target_receivers.append(sender_idx % num_receivers) return target_receivers + + +def state_dict_to(state_dict: Dict[str, Any], + dtype: torch.dtype = torch.float16, + device: torch.device = torch.device('cpu')): + ''' + keep state_dict intact + ''' + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + new_state_dict[k] = v.to(dtype=dtype, device=device) + return new_state_dict + + +def state_dict_filter_lora(state_dict: Dict[str, Any], keep_non_lora = False): + ''' + if keep_non_lora, also return non_lora state_dict + ''' + state_dict_lora = OrderedDict() + state_dict_non_lora = OrderedDict() + for k, v in state_dict: + if 'lora_A' in k or 'lora_B' in k: + state_dict_lora[k] = v + elif keep_non_lora: + state_dict_non_lora[k] = v + if keep_non_lora: + return state_dict_lora, state_dict_non_lora + else: + return state_dict_lora + + +def state_dict_lora_reconstruct(state_dict_lora: Dict[str, Any]): + ''' + xxx.lora_A, xxx.lora_B -->> xxx.weight + ''' + state_dict_reconstruct = OrderedDict() + + \ No newline at end of file diff --git a/applications/Chat/coati/trainer/strategies/base.py b/applications/Chat/coati/trainer/strategies/base.py index c10bc2d185a8..bd30422022ae 100644 --- a/applications/Chat/coati/trainer/strategies/base.py +++ b/applications/Chat/coati/trainer/strategies/base.py @@ -104,10 +104,6 @@ def unwrap_model(model: nn.Module) -> nn.Module: """ return get_base_model(model) - @staticmethod - def _unwrap_critic(critic: Critic) -> nn.Module: - return Strategy._unwrap_model(critic) - @abstractmethod def save_model(self, model: nn.Module, path: str, only_rank0: bool = True) -> None: pass @@ -134,3 +130,7 @@ def save_pretrained(self, only_rank0: bool = True, tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: pass + + @abstractmethod + def get_model_state_dict_shard(self, model: nn.Module, **config): + pass \ No newline at end of file diff --git a/applications/Chat/coati/trainer/strategies/colossalai.py b/applications/Chat/coati/trainer/strategies/colossalai.py index acee42d7b5b3..88268b677eb2 100644 --- a/applications/Chat/coati/trainer/strategies/colossalai.py +++ b/applications/Chat/coati/trainer/strategies/colossalai.py @@ -171,18 +171,6 @@ def save_optimizer(self, optimizer: Optimizer, path: str, only_rank0: bool = Fal f'Optimizer states are sharded when using ColossalAIStrategy. Only rank0 is not supported.') torch.save(optimizer.state_dict(), path) - def get_model_state_dict_shard(self, model: nn.Module, **config): - if self.stage != 3: - yield from super().get_model_state_dict_shard(model, **config) - else: - # unwrapped_model = self._unwrap_model(model) - # for module in unwrapped_model.modules(): - # if isinstance(module, LoraLinear): - # module.merge_weights = True - # module.eval() - model: ZeroDDP = model - yield from model.state_dict_shard(max_shard_size=1024, only_rank_0=False) - def unwrap_model(self, model: nn.Module) -> nn.Module: base_model: Union[nn.Module, ZeroDDP] = get_base_model(model) if self.stage == 3: @@ -198,3 +186,15 @@ def save_pretrained(self, if self.stage == 3: raise RuntimeError('ColossalAI strategy with stage-3 does not support save_pretrained() now') super().save_pretrained(model, path, only_rank0, tokenizer) + + def get_model_state_dict_shard(self, model: nn.Module, **config): + if self.stage != 3: + yield from super().get_model_state_dict_shard(model, **config) + else: + # unwrapped_model = self._unwrap_model(model) + # for module in unwrapped_model.modules(): + # if isinstance(module, LoraLinear): + # module.merge_weights = True + # module.eval() + base_model: ZeroDDP = get_base_model(model) + yield from base_model.state_dict_shard(max_shard_size=1024, only_rank_0=False) diff --git a/applications/Chat/coati/trainer/strategies/naive.py b/applications/Chat/coati/trainer/strategies/naive.py index 5d3da8ee3478..972deebeaa0d 100644 --- a/applications/Chat/coati/trainer/strategies/naive.py +++ b/applications/Chat/coati/trainer/strategies/naive.py @@ -9,6 +9,9 @@ import torch.optim as optim from coati.models.base import get_base_model from coati.replay_buffer import ReplayBuffer +from coati.models.base import RewardModel +from coati.models.lora import LoraLinear +from coati.replay_buffer import ReplayBuffer from torch.optim import Optimizer from torch.utils.data import DataLoader from transformers.modeling_utils import PreTrainedModel @@ -71,8 +74,20 @@ def load_optimizer(self, optimizer: Optimizer, path: str, map_location: Any = No state_dict = torch.load(path, map_location=map_location) optimizer.load_state_dict(state_dict) + def save_pretrained(self, + model: nn.Module, + path: str, + only_rank0: bool = True, + tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: + unwrapped_model = self.unwrap_model(model) + assert isinstance(unwrapped_model, PreTrainedModel) + unwrapped_model.save_pretrained(path) + if tokenizer is not None: + tokenizer.save_pretrained(path) + def get_model_state_dict_shard(self, model: nn.Module, **config): # TODO: implement sharding on naive strategy + model = self.unwrap_model(model) if 'requires_grad_only' in config and config['requires_grad_only'] == True: state_dict = get_grad_required_state_dict(model) else: @@ -111,14 +126,3 @@ def _try_init_dist(self, force: bool = False) -> None: except Exception as e: if force: raise e - - def save_pretrained(self, - model: nn.Module, - path: str, - only_rank0: bool = True, - tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: - unwrapped_model = self.unwrap_model(model) - assert isinstance(unwrapped_model, PreTrainedModel) - unwrapped_model.save_pretrained(path) - if tokenizer is not None: - tokenizer.save_pretrained(path) diff --git a/applications/Chat/examples/ray/.gitignore b/applications/Chat/examples/ray/.gitignore new file mode 100644 index 000000000000..4cf8dd15619e --- /dev/null +++ b/applications/Chat/examples/ray/.gitignore @@ -0,0 +1 @@ +logs/* \ No newline at end of file diff --git a/applications/Chat/examples/ray/benchmark.sh b/applications/Chat/examples/ray/benchmark.sh new file mode 100644 index 000000000000..3852684007b7 --- /dev/null +++ b/applications/Chat/examples/ray/benchmark.sh @@ -0,0 +1,39 @@ + +PROMPT_PATH=/home/lccsr/data3/awesome-chatgpt-prompts/prompts.csv + +num_trainers=4 +num_makers=4 + +# "facebook/opt-2.7b" +for pretrain in "facebook/opt-1.3b" "facebook/opt-6.7b" "facebook/opt-13b" +do + + for experience_batch_size in 16 32 64 + do + for train_batch_size in 16 32 64 + do + for update_steps in 8 32 128 + do + # set a big enough experience_steps for twice maker-update + experience_steps=$((2*num_trainers*train_batch_size*update_steps/num_makers/experience_batch_size)) + + config_string=${num_trainers}_${num_makers}_pretrain_${pretrain##*/}_experience_batch_size_${experience_batch_size}_train_batch_size_${train_batch_size}_update_steps_${update_steps}_experience_steps_${experience_steps} + echo running: ${config_string} + + nohup python mmmt_prompt.py \ + --prompt_path $PROMPT_PATH \ + --trainer_strategy colossalai_gemini --maker_strategy naive \ + --model 'opt' \ + --pretrain $pretrain \ + --critic_pretrain "facebook/opt-350m" \ + --num_trainers $num_trainers \ + --num_makers $num_makers \ + --experience_steps $experience_steps \ + --experience_batch_size $experience_batch_size \ + --update_steps $update_steps \ + --train_batch_size $train_batch_size \ + --debug > logs/output_${config_string}.txt 2>&1 + done + done + done +done \ No newline at end of file diff --git a/applications/Chat/examples/ray/mmmt_prompt.py b/applications/Chat/examples/ray/mmmt_prompt.py index d2398d451c7b..6f43d8950758 100644 --- a/applications/Chat/examples/ray/mmmt_prompt.py +++ b/applications/Chat/examples/ray/mmmt_prompt.py @@ -101,8 +101,8 @@ def model_fn(): ] def trainer_model_fn(): - actor = get_actor_from_args(args.model, args.pretrain).half().cuda() - critic = get_critic_from_args(args.model, args.critic_pretrain).half().cuda() + actor = get_actor_from_args(args.model, args.pretrain, lora_rank=args.lora_rank).half().cuda() + critic = get_critic_from_args(args.model, args.critic_pretrain, lora_rank=args.lora_rank).half().cuda() return actor, critic # configure Trainer From 8588fe3a6bc826a2044e38649bcaf0f16cb71753 Mon Sep 17 00:00:00 2001 From: csric <59389055+CsRic@users.noreply.github.com> Date: Sat, 6 May 2023 14:39:12 +0800 Subject: [PATCH 13/26] [chat-ray] add readme (#21) * add readme * transparent graph * add note background --------- Co-authored-by: csric --- applications/Chat/coati/ray/README.md | 160 ++++++++++++++++++ applications/Chat/coati/ray/assets/2m1t.png | Bin 0 -> 54081 bytes applications/Chat/coati/ray/assets/2m2t.png | Bin 0 -> 171753 bytes .../Chat/coati/ray/assets/2m2t_quantize.png | Bin 0 -> 134715 bytes .../Chat/coati/ray/assets/basic_structure.png | Bin 0 -> 41408 bytes .../Chat/coati/ray/assets/tp_ddp_hybrid.png | Bin 0 -> 111006 bytes applications/Chat/coati/ray/utils.py | 1 + 7 files changed, 161 insertions(+) create mode 100644 applications/Chat/coati/ray/README.md create mode 100644 applications/Chat/coati/ray/assets/2m1t.png create mode 100644 applications/Chat/coati/ray/assets/2m2t.png create mode 100644 applications/Chat/coati/ray/assets/2m2t_quantize.png create mode 100644 applications/Chat/coati/ray/assets/basic_structure.png create mode 100644 applications/Chat/coati/ray/assets/tp_ddp_hybrid.png diff --git a/applications/Chat/coati/ray/README.md b/applications/Chat/coati/ray/README.md new file mode 100644 index 000000000000..f9133b049446 --- /dev/null +++ b/applications/Chat/coati/ray/README.md @@ -0,0 +1,160 @@ +# Distributed PPO Training on Stage 3 + +## Detach Experience Makers and Trainers + + We can completely separate the trainers and makers. + +

+ +

+ +- The experience maker performs inference, produces experience, and remotely delivers it to the trainer (1). +- The trainer consumes experience to train models, and periodically transmits new model parameters to the maker (2.1, 2.2). +- Using an experience buffer to overlap transmission and computing. + +In this manner, each node will work continuously without model idle time, and different optimization strategies can be applied for inference and training to meet the needs of speed or storage. It is also helpful for scalability. + +`DetachedPPOTrainer` and `ExperienceMakerHolder` are Ray Actors (distinguished from Actor Model), representing Trainer and Experience Maker on the graph above, respectively. + +[More about Ray Core](https://docs.ray.io/en/latest/ray-core/walkthrough.html) + +## Usage + +See examples at `ColossalAI/application/Chat/examples/ray` + +### Setup Makers + +- define makers' environment variables : + + ```python + env_info_makers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(num_makers), + 'master_port': maker_port, + 'master_addr': master_addr + } for rank in range(num_makers)] + + ``` +- define maker models : + ```python + def model_fn(): + actor = get_actor_from_args(...) + critic = get_critic_from_args(...) + reward_model = get_reward_model_from_args(...) + initial_model = get_actor_from_args(...) + return actor, critic, reward_model, initial_model + + ``` +- set experience_holder_refs : + + ```python + experience_holder_refs = [ + ExperienceMakerHolder.options( + name=f"maker_{i}", + num_gpus=1, + max_concurrency=2 + ).remote( + detached_trainer_name_list=[f"trainer_{x}" for x in target_trainers(...)], + model_fn=model_fn, + ...) + for i, env_info_maker in enumerate(env_info_makers) + ] + ``` + The names in the `detached_trainer_name_list` refer to the target trainers that the maker should send experience to. + We set a trainer's name the same as a maker, by `.options(name="str")`. See below. + +### Setup Trainers + +- define trainers' environment variables : + ```python + env_info_trainers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(num_trainers), + 'master_port': trainer_port, + 'master_addr': master_addr + } for rank in range(num_trainers)] + ``` +- define trainer models : + + ```python + def trainer_model_fn(): + actor = get_actor_from_args(...) + critic = get_critic_from_args(...) + return actor, critic + ``` +- set trainer_refs : + ```python + trainer_refs = [ + DetachedPPOTrainer.options( + name=f"trainer{i}", + num_gpus=1, + max_concurrency=2 + ).remote( + experience_maker_holder_name_list=[f"maker{x}" for x in target_makers(...)], + model_fn = trainer_model_fn(), + ...) + for i, env_info_trainer in enumerate(env_info_trainers) + ] + ``` + The names in `experience_maker_holder_name_list` refer to the target makers that the trainer should send updated models to. + By setting `detached_trainer_name_list` and `experience_maker_holder_name_list`, we can customize the transmission graph. + +### Launch Jobs +- define data_loader : + ```python + def data_loader_fn(): + return = torch.utils.data.DataLoader(dataset=dataset) + + ``` +- launch makers : + ```python + wait_tasks = [] + for experience_holder_ref in experience_holder_refs: + wait_tasks.append( + experience_holder_ref.workingloop.remote(data_loader_fn(), + num_steps=experience_steps)) + + ``` + +- launch trainers : + ```python + for trainer_ref in trainer_refs: + wait_tasks.append(trainer_ref.fit.remote(total_steps, update_steps, train_epochs)) + ``` + +- wait for done : + ```python + ray.get(wait_tasks) + ``` + +## Flexible Structure + +We can deploy different strategies to makers and trainers. Here are some notions. + +### 2 Makers 1 Trainer +

+ +

+ +### 2 Makers 2 Trainer2 +

+ +

+ +### Maker Inference Quantization +

+ +

+ +### Tensor Parallel + +

+ +

+ +## TODO + +- [ ] Support LoRA +- [ ] Support TP & PP diff --git a/applications/Chat/coati/ray/assets/2m1t.png b/applications/Chat/coati/ray/assets/2m1t.png new file mode 100644 index 0000000000000000000000000000000000000000..9281943570d3686a190f1e03f1b8884ca90b3f97 GIT binary patch literal 54081 zcmd>m1z45ox;7vvAxNWuq;z*F-6FAAbV)ZzcPdHM20s>+N z>S=IBJcWf1{12iNL`EE;^bP3@0)oh#vy`T@t-G0}jVS^Rm&DP(Xt<=nzfCL+%^eMG zX*k&}O=!3zX*fCEJRPL0_>`5+l-(qqWq6F8*;%B)X>f&|p{?oBHOhwOrYsz=vxg6{ zb(e88ykT+8-o(@fTxa5bbQK35>^9h)CLYIU*f|dG;$$~>gum~^2~|ta;}>wSAKhr~ zVrgRPbbRyCDQA0o8)wTKzdmVfZ)ay}?EI?>4ILfr-F|(UnZ3>NDv#fK11#iMH^EM* z8XMaDcvi#G#M$ELT7I76&x2hpYien30e>?G`|&TfhVYAzPdHf^n%KJ?-G6xMP;!p; z_TW~jL#0TV+Q6g-m&?(8(*NSu@89oeYIpqde}4lP{5zaI;EOXg0c$-zw0Cs2us652 zGqjQV<*c}cv#kyIWDXiGaZ`6oXU!A;)&l=#XXQEmMG~fRcK88I+lL2s&W;|io4y~M zxC{2%FSi{Y!S6FMbh0poDGK&tCuc`{Yf}k(8+%7^hK7@iOM+j3AIL@g$R2JO{vv{7 zsW{oYI2s>oko(vKoedq$O^+o7S079OhtK3Zyb*58ruMd`;NyTU98GNuoh@BYUc||6 zcx+SV->*A%12IQOL)cyL3vSq3+BrMHzVi6V%CHmQf@A;Sf`12W-QY5g$uyk2Hb7|NX7-04Zv0&f zybdlf%LO{%GBaZb|A8yNpW-!#{REzNbTjyrqx+A}AF0jh*LJ92Xar&h*GY>rw6Qd| z1ClWY7H$f3FYapU=xhmmQ|$N$TT2rYm@SJtnmSo}8X6tiHO$nG1rLPEBTmC33C{dz zDsX>*J=FAAA;;@K4kpJB1h*c#&auqCi}br1*;&~+c=?Xr@W%|s)*Y^4xXIX?nK=O~ zg$2!HEBJ5dk?Yvxf2Buoj{Stp%^mGsZu}?}xCQ)ap}@s^Y})(+tOCa(a{DEYa>B2& zI2l9XUICZMNk99QRQ`=Ab8#K5?Y|*{@6!A4%izQkIRuX5?q9z|?*Dp;K*@F{-y^A; zg{8Bp>J7ug(C7v-HgJp+5gz8F(uTH{HZYyZn%cOU!n_S!1N-4b{FRnI^!J}S6ev@chLxw6%9V4jZu4buum;9@{$`I-mF* zluM>3k4;T1Pagg{4#6Jqqr-kT^#5AoCnUpcl=AmXkuu}Z}w9$@*4A-8kzlP#0U@Xe}@?Pe=tT>Y3N@nLo+io&VONw zyhgk{ynm4le-kC)hku6*1%8krjF(>;qLu*jqe$R3zf+VO+VU_fWB>n4%XJ>B*tA7Eo{~jKXPyPPl`DazZ zFTwdw$I|b?^T+7<(=7B?Bq45i8T-e846LmA8IXbNM-lKFM{4?gxaOasz5kK2j_bIv zf@>eH)e`}W10KXqR&{Vd@MnrTIO0F)ZkozJge<_e02T@6K0ot-6E1gj=SiC3UqXa` zS>}Fs?qh*)AIs;2Jm5?J%f)eAlb;AHCyAfqcyVIAe<#!P>u~n($sM@VPqI63J3Gnl z!1e#vu{*yPb#ng>NcNuw-rVqP19v@mje5e=IN{joq^)tlPyQ+2`>O%f?~Pu^vHNF* z|97qY*AVHr@P|v{q{#kb7?k@*#Pma~`XwL3P~d+Nul{xn`o{&#cU%m>ZS;561|0A- z@~2GnM{M;EL8yORj&R19{aBFj9CE^N;qHIZ)!`xHPl@qIZ1qopsDECLaQpl_1bb!QWcsJY z2ro)bnB#v6;Pd`SX8t3Zr+-|A@bdRN(mV?Fe}@czq(lD+&C@?E!oSlDf22o$+B`i@ zMkl%)|BUwAk*%HV9{mC7f4A7*Nsz*k{-5DS;G>Kuk`R256ISE^xcAd3&|?DrhbhqC zG*5w={y%G;{sDC#r%rfN3$A^*R!;=1|5)>s7hb^sl02pU+UDuMgb4q#%>7=%9t-4e zY@YHSL!lF4<$ou`0hjtoh67%}pJX`T`v3c3H{ZV}lYYKI0qm-{0UK362A8lc0^+~M z9REX&?O$zSuz?M?iyInS!)OCL6Zj;fiK&^P%i(za51jD#Jr4gCS03+|0efnGJ*4}~ zgH7#Bz@C7^eJ92?hE7gkg79Z8Qh4$_v2Oxy2q%pLp7#HK>aKEcB1qm%w*i%KeLA`WJfmFB_AzKyZM~u)&5} ze%WmVJAeu3@R#qKl#X^Y!4ALQ^?Nrd{crSecp5$F;ata{@2~Ul!>s;CcSiBTr!9`H z_?WKyR~w_glP^EL9PFU^ll!86tcl=N?7yps|JA-Ikmr712@F%he|TTie=DB-l3soU zH*g>S^U;i*m4lu47(f3r!q;(R{ky{#EW7{l@O60C|K;!n?>?UlUvTXD4+~%1e=vUi z=K4P$zZqo_keLlX1!pbACJOve|CO+_XNOz#m25S3MurF^}( z^QPfd=i}FJ%qlw;_ouv;V&+F37neF{53Xz`3o*V#L83tT3TFx8-wQE?J>`L!|{LHSZ(%E^FQAGdqQdjeuB(OaXB)2B-Z+ z9gwgKo>IdeB#MZP9OXHUt)e;TV{O$>HB^_BuR<+;NevMz5DmPZkPacr%Hc}_Htd~b zwCUAOG7|RB<+%NxQz%$2XlM2pg}5$WQWHh+)%cu?h=_fIAL(1u)D6~_Krsa5SlN#m z3;Vr!A)w1=;xePl<$#^bV zjVCceN#?;Nvy3)$t3WTN7ns*j45(u>1J$E3wZ(wovx*}NdWHyuWyt)&S@Jjm~W7eu`HDTQFknXWZIOA7rooFHqc_a&w_CD}3de$Ub=o%Utwj0n!2xW=l8RDY) z`-h<6hd@hE3;9vd^AygKGzK6nNlUZZ;PM%E)ca@Xf=@$Vy*3?)vR)v%u9YEQw-vwq zSy&~(1E%Z-1)!idq`0!GF;-p-$-)Qe#+umfq6mR%52}zP3t#Ty;#Q>Bg(hpaNZ<0y zX79a@1zvao%|bDy=WQ4pu3p{t7_*W{_^dA%XUkiP24r#Std@LeA1f`@Gm>EXFrbLX zyNK36xumfOSgUTzEynLdDX@tvzD5tdFW8K7$U0EICB`mJfe;l+f(SDLd{G1{HAF>! zG_0%X*Pv_33Bg}OurNe3Fz(2zB^ZlmW{TVmLrp7Ue{%RG570!j6ECpeK7)w;Pyn`2 zCoKzTIV~?0sjo_c0qhcBkKoVW6ixK3*8Qp%?8^y;C1V3KT7 zJTLSv`;;-e7O-Q}aV{V8FHH%l4ARJl7a|}^z?OFIl_ILWeg4JKDpJH?X!|2p2ipD9 zZa22qkVa0^)RiK{7$P0NagYFU-h{C$1-n3fLoTUR3h%WXS6m-=zW|>PWtf`b@QbXM znwSEe&OP~NuN2N-X=yVv@;v^iht^0LD&4(C)Gy5k*Qso69KbVN2l0CIygWUj@;whP zGPcoWB12Sp!sRD2A(l{q_cQx`ifO5~rd%9X;0p%c84H9j)A09lzo=Q6oOHsdQg zscvX^?lbuuaeOsO-+&88F(*O{XxYQ}wyfGcC1CT(3dH@Akb+4HA~msy0EVM$T$K=p z_u8BuRZA7tuG}xy4GR=pUn?fqaPI5BSugc!Y3p=#evhtNKrm+ry@s|^InuGFupgoz z<>D6T#*GS7P<+&)qsyaPU0Ja*H#grQz{iiB8>w~sz&GZ-dY7faKfF>#v_xHKd0dZ` zm38y-l`AnLA3l_RnVLGdA06%JAN=h=tFnqw^DI10Qe4dz6cXAjh2*q7ymjl=TS6A? zY~h35RlBast9LaTZ(|lnhZBDpA4hyiaUBOqKtRBGZ`(!CY4Yse#<2Z+uZ{WF0YBl@IE)QQ9Ti6>{(2zt5U|S2D+K)klAjdc`X` zFE4LJ{Y)5XqNuAYukGkFtvctKo<0mBR{Z-+8g|_<0S-1y97hdLEyL&R5PSZZdM z&sHziTy`NMirL{ z>-WgYM=GG8!1b3^8?B>I%f4Tx^<+|mI6QZuEsm*9$ZfIsSzH4Z_C3+-1EqT0Caw3l z&CRD0m^4b)XfkVYYch@(m4XLb6e+V*cIDU2&4c}kcmvmi`6pdSu~o>L$Vkn2=lO43 z|MVtB$SEc!#(*Z%xH;0F?D*vfNRyYY%VrBtO-@$Yz6eB_9dEotbmehx@@YdG?1%UP z6YcR+cQ0_=URhbG!RhY0pGdTN?CuDt=V_G_WqsbffA6sgbe-k7-mkHhK(0%1-3__Y zZX|K6NP{i!<;#~ul+cAYb(+{P4am~^FZjOBXMXqaQq<6g&Q6xaqO#|V19e_*=3!bl zLqr?Uk)Q7@3e65z*(>Vn>!adatO`U+z&#XqLmRMoQ`0lk5A@Sy*Vi^TMXuNmao6Ke zOD8yo8Dw>TeOI_b-$0c_^>K1inlUazf#CD~a+BB5cOP(>=hD#7@K8}*UTEj{w!nm0 z8m|(}J6OG^*?c+^dFsz$U|8>MO{KLnMz^)Lrjz@A3&0{5R%nSN&kG}BgE%^JK5A${ zL2^X>N_0^~M5Iusij~KHGgdFUiCt;Z?4(8_D{M4;UY1N`Pr#`&7Q>acePsqB#FyRKD=d3+JIby4@}ur2B=Ap< z)_IM^dSE-l<0#M6_232-?AKTgLZPprg`P&#&bKe^H?MYEf-%STkgyhJ_ z-6cD7Q8b+yxu>_+a-_zk+<6A#xx3tA`L0;&Rpb=U#!$5roecD|Dgk( zGYvAs{Its@Q+8pr+_JZyPBHl_SMw{xQ>T>`6|sPqzyH$zu6Zz^k|I44A&s^h*zL;K zujy>hzrCcF1;%kz&|~Gxb*dpNs0{b%!vOeD4&=c$rz8rVi|mpvXJ&^n|jn}w?5d9j|7D^L-n#zFap8SM-s+z>>+HQ)K!u7&z9Xfvggy-9{OjiAcaW6K;e0NyL zOI$w!ab7&jD}7w9B+>f6WSFQ{nwXpOn)vwQW(AqoJjcy<%ZPg)?B)j_ogSu5)|IZF zA!_A&h@$qjKthqVS*q@=;`K9_eJItj4?II4cBIss@{!6Fxs74!E^}{1+pY=9Kd;!n zrxAzk=op1RU3rSJCqA9EHNAM)A=c3auX?8{yKv#%h52XKUm$JoEY%0yJiyfZz{<=# zZzV4;Z!2NDEPND4Z1F&%NIig(k&!*%v-|LMWqFyQUr^X8G%Y;!-n|ACnw-jO{ME_a zTLsX%eT+7pN8&6s9*Z)N>TTH9(2sxB$$qVvJKLGd(5F4|z-9H-S%}BBN=0Q_oRuyF zf~!`f9?r?hc`K_xz80dYy7eWjI5{~Py-kOM`AAvpG>A5vNzu8M%@B}v3GfLC1xbkQ z+V8WJyDn&NkKZAew@rND9=n*l6+O$nWN#t+tW4r#&+M7b=Y&t6KhMM{33KpEfS_wsWj{9Bd|iV3s9HTNSP_K-B}rctjMbNLv>O|SPAvy~fs{VWpj>{A6%gp{7%6G#`3Sm_@pzK%wzVyFoMR~%ug&qBsqx*bQ6A~)hDs1g z7E+44lfz=j{7nS(yx1w3*Hrx@BPrh)F5B7JVHcRKQA-wHehQJa($^^3J<@L=Dgr+~mMPl*s!h_7_{;VCM$R0#^m#Il6_aiy({4yyPZcO?gj}4Rb(m=DP(Ac?aLIsv(GULpX(hcnuK&V6ygip27Dzp zcr+}WbB`_wCu~e5MubrR^QmP%=A>)B!jqBm)X}r<^{*Zp9fCF#n5ze7TDCj{_9882 zPea=~FgGcT&(cp=9Gt!Hc533wWOw;i(R9l5k=C(+Bw8uftVr6kM6WK@w|shXV9kb> z8PmiZHfVUti*R^xK#_odWAg&>zF{nd{?_c3ZxY(PeB#sg1sGKQkZg`~u=qAebeN^n zVDc;`;a$q*8%?3w9h`r(EfuI{pjhbohOx@+)q-z?t8a}1N#}V!%~mPX7w;+8UT4Zq zuWc1s%p?aJ;V3UG_;Q3YqHC!PR(jM3RM@=J49;dv-3xeukEH6$OaZwjhi42+ZT_FJ zVWw2JAfIjZxnB`cR$j5uwLaA=GAqme?o+neChN&F61P|mo{A`FZ=WuEq5t zKCL^?G9m0@g@R6{me$I%DT6@O|KS$LVTc~215#t^Dg#Rp2yD}+*JmTdnQWa7c2MEo zYkcw2x>b(FcQs@ycL+&U$KTPqItm&Lo$&c+h;y822hkC0u?w~x6}6t0jy=6fpDTW8 zeSBEOARFpcyC1Ys5+)@lFiX5T?2yR0tw+q~q|%K8&6&s+&mL+kLmWgq22gF(*NLd& z9?edsSfx3IzAU(7f0;1e+frk7wToGLz_0e|;G~kg9A0ReVzM9|baT}@m26R5tiQ}} z-}qq-IS2^k?f^Bw3_Lvzp>ikA&3rIiR*h`vv6jh<6QtcN$I*+DPObWRfa&2iCP?(j zkW@E{pSq-IHe=T+j2l4%@#5D${&1x1^3Qx6rW*(uf z7V@t{*A3`lW%9F71EP&61HkfepF+;K!h&INcqr73sx7X{aqXx+L_pl3y6za|X*VW> z|Djl6G8-!5vny)^s-sPG=GE4S*x*wQyu1oJEW>WA)v9@5Pd+sfr*G-Vmp$eC# zF1U!ZJoO4Xz@@0DnBwE(W6<&Xwa1smk=i(2U0rkxboAu*c($~~@o@)A8XD6o?UGR` z$@)Yq$lX(iYEu@3nc)%^N40FY2yfam#&6MX{bp@CB`;azYh41POXukh;E6qJz4aQ?9W>oYd^DfPd>)9CMuq0yBf0mk@yi10&M_bKB9C%o^ zo|gh{Nv}(2Y2z5xramUT&M;wakkzow79~Gn6BRVLa*6mFN6pN` z6=t2p#DdNifG!-rL8Yx$sqz-%&`{I4VdgWsXGj>;*8a@Jie=>5W%=j*+V|oOE>fAF zn;)_)C@O&DsH6_Ss`39$=s#Kgq#+~x;WmX}+|<83tLFb}rp*k*fju1b4*U$#Gx zc}<3W3g#a$r+SH&a4mbKMk#qrv8A-ByX2XIoD?IvxvAr-CcTG#*oq6F#L*>Cs)qYTon zu)1KXVHsYgUnfsVNeQb@&&x;_UE^gx4d;wdaM_b&M>Yn`ov9bl`wKqA7J8hCDSDEG z6mPE=vx2coPr1OUCx2NkV6>uO__IC1g>v66tr}OMuteT`#fnotuSW>B3Tj5%I@!{h z_#o3?-X}qygc7YU^@%=~=%k0}0Mh7SqlMg$9{_XR$5$XEWFo%FsopDcEo zk@799e-xs197`i z)6I5!nZFKKJBh5wn%{3gfoeURF4f)Wb$6CbU3kUF;0B9MrNYi?mxv5*cYO1Pl-FUg z5O4=@w2Vuo?A5X(-&!Cm3&2TIRP5s`4Www?wIOkNehND6@Z5VnwhEa3!u)7!(c zt(HxJ6*9GSLE>WK8l~I7%3)|}>#V<8doU!*c7<4O;z}FC2Y@l$_@>BghkWi*-MM|9 z`^{OZ3aJXKeyq`F9#6!fA~)vWt9C>H=-Capv1@X2a@^kEZj6s%5`R#d;rH5L zUSAwZy3e8`4JeYnVZX_a2i&G@v6&o28fB>u?%l(dmTWo8dL9!FdYe%bbhFVmTti-AjA;xR=`cs*ag&hA}We=ONlvPq|5MGL2U(vY;l6dIL)|gSh zgS~<TOICdRp4snORvu<1ZT<3$7*c2J-h0h|tl=SoGHW9b|F< z)Z()Qu*3s7N%9NYY@>i9Kg1C_)Sw_{j}(I?hiXYHi@wZSNn*Zm=_iBzQdeX&inn)m zT1vH^(0+;2(C=pAbkU#*RV2*y4higUY|MJK|HP~_Q_y7=Rs4S0QvE^dsMk{6)9mbQ zWsg319t0#w?V0a6mm^*lGenzTTuO0OHphVqQ^1-#k!HxKLOcjP1W;J#@^z|8FGaN# z;a7dz8XM9%*rqDj)~|J6rUI_As|fy|+Dw0WGO*PXPmW@Qr0D5|WIede8evnJa#_n= zS35waX-@KsQue2ViAS1hmfTm9_4=HQ=oDmITU&2*N8Fm7%wBl}eOA|H`wcg%VAp&VjP z*xaYC+qw~(06~D}iDcml={Y#U35G^#&s7smM-dtD@@v`Iu?kc_ophAIENCx~&7q0b zEVf#a&Dvt+HBulCpL~uL9UarQz|ZC~^(I9;E#APlTGn5FfsLL$E>t{=Z~_-C@=_8B z36AhhgDG1ks_m~YkX#JXzI|L55L~$G>CL6BWI1 zHb~pPp8NoIQOZEaz}b`ynmXYWZs&WlKFqE!2OoXvd~3I#Np~)sWbe+4q*E|Qm=A>o zw<*Q%HBELX)EFaAQ#VLjDi1q~Z;qLuV9AP`(N}vRxi5JQWy-AUjl7bQ+xpsC>?Mdt zkskn|#xzi*(UsncFGa608z!YZ)W6RxytUq=c;=%(SDJ_tEaG*vwl-wctk@;p@w_7~ z9T}2>{rv5#MkD?sZ=(jUBSvh;y+#R{P~nY{b+O`N6O<`YCy18k65^Q>abVP*TUzq7 zm}rZ$^;(-&xdd@>f>bX&_t~*no$M6n^Vup02ngV|8Dy8Eashk>jB0NLv1s9o3LMA6 z3Mw)H7!s5Kx)pe(K&95yzejLI_gyfGtY`7v^Sm~LcTL;x6EJB;S0(m%og){%Y!3*$ zWcv%IZ#+mn=I^xzU4t_AuMGx&O^xV4v_69Z|&Q64BSj?wCa^4tw(j*&6 zDiV16?gjDcY7cxKo>0YIk3hm=ebE;#)uR%paNp-!eUfxV&a3oBd0!o5e2Gi7P~{0{J# z)Qc^)hpfvU?8WRaRE_n#&5%%{1sp!tjtQ2qm@IS+pFm{`2ont}0;&BBVlAw%Fi}0Q zD8)!^Pei8waDJp(mqx-DLYRr%W~bzu;j;iY z&PNq0-J6`8e4AWgJQFuCNVVy3u03aP_>$W)m<+jh*5mV;V{cJEJ_tVm$gd5$|&F28R4G4IWGPxu4;QL zw4RtPSW9Mbk*LX{Jx>CJ(Hg!~dfvP)J$aT?E0$m-iQ&D=&3Dhq0Ky>`a%1Flxy`su zp-Pcrh*QG0S=D3N35(e>@J*hcYtuWQMo9-UrNXAOz-O^AFw9(Vx*N%H%;$@uAG+T4-RZ+ZtVzEOb?zT^@_<4V{kTw)epI*l{N!P<$ji5f)^XXUFz-B{IG{yE?|A z*F1?-F`GCvJ2Xnwfn-ei()!o^E76YHo$D&f{<0G}b>mZ0RV(1d6C`|gHji^s0mF?l zI|BM{l#**XuTM9No<_SR&9Zxm*QjIXh-vii#1$QnUC}ikpo(*F5}sYq3Bl*!5>M|E zC}4)%oI~lQb0WeR-h1!%Bxu%;!>u2ua_51RoD}4l)EHy=&3iFc#j;YDDyAibUjm|h z1#q}*>bJr=*CsaB`*sT^vFIRI3m3;W7UIjVZ*3d3D9D9puXIC)7?7CVw}^$My_0J_ zM3B)#vO~k>r{?E(x1M>dCb!(ZpgI}Eru+0>b0oQerl#gMS{z&YRncipGo){y)y$ly zQIU8h4vDw8kua$An;@0p`Vp}wgm1Frx{GefGKN8xT{tc_bW;^{NN*-XOW&CX6f()X z_)R|de$HrM)L*YT;y1rZ-Ir&rGhWy8MvteZMa+;YE`D%uY|O8szWyK!ko`q{c}nE3 z+d-=ZuQP#z`U?5J zlTd$!y;uJxl2oCEz8i=HazQ~sn80qw`!XcXM7EH5-TlUq4;b*+m>3B-8r@hLOM7D4 zPFV5e$?AJhCq)KZ6xD5^&4FQ-!)i#9;2LpV5oiZ=wFC|=pWojE^oG1#mRl`1hs7z$LNQ4T_~?~74UCLgepA2JI7X5W zdI%sEnDjTj@A+a?k+_jIDjTV`D1XM!Epx!YModu=^`qQ+hIUCtnJamg@I|r==?WiA z*PF6D!KjBoqagzoHkq-=TP-_5v$(y(B;O&Qe*Z;RObohs|GnXUVxEh2yLf6Fh0-bH zUMajwB1%<|)qGW12}g)nKJjE;#uW$i{K80*^;zXEWv6kUV5nTe+i66Y2GqZ6V2~NO zRrO9YPdcIUAFaG@4SIW!gWjQ@j zez?=0!kuf9lFoSxBDdAnLPi?o`?kBS`qFbFRlKp~c+le;`I^bu?$o2iVtv%2vYMEh z8jvngA87CF#B6PEUsW#ZeV5*v9_;0-)eayji#x5i;w$vtEK(Rvea=Hs*EV!i5xK9;?{B z4^CZi_U|9(SnqAGGtkoqdwYAg_V@SSjz80D8zAcID-0UM5WtXtd5(w(vMY(OTs?Nh zXS>|IDU@LI5*=L`W#&awJq$@Jt=Ac8*FfK`!gBM4JN69|q?>hhb#w#8TD{iOZ!<>0 zoJ$^<)ez!5SF9I+jEje7Irpxl+^8wE+$o&C z5;AFsC=oe%no=UK?JSqeEOZI$q9A|I&?9vc+J}AzzKH@(W<#%I=v2XfV|=ua!TpU9 zcV zWu%PMfIpbVR##7N(|5UvXkvS9<}=0BX3mS@4WRF40b0(RE32zQ_b)*yAr}`)8$3~f z2%q8rn1e5$9(CtV%My_DdZ4IU{+;XhU!y)$_5r;_fz0jL=OupNZ5hp?m6G6X_n)EY zh&4E%e%sngNfLCix?-~|6w!6jC+FF^h|{EkFSe+U9-z}AeBJn`x51nGmQ8he3wPR4 zu}1DvHK^?ZH7~8dH&1)*8Q~6EX$wOYVJD_TdVm=P8??ZtL!MSwtB8ss^q1(wC-T`V zf|g|tn7hHNXPxq#E2(-6`i(VORkMI($5LCX&;o1Wof9lNAMui*=(=0ETrIRn-L|p6 zNP{W1A{4N2et9rT3C2P&GvEEj+z*(bz}fyH8PF_`V!1@xNvXNii2N<=&BAci%|8HBfD~VtluHWPmuOqjc4udD^Q&Qb+pDdc9f=v@ntnfX{8cdFGraiFF59W5_COZh;UbcLYz|D4BI_C&2F6t(=jpe|%Q%J6QdVF>g8{HKjgO?2La`{)FCl4^ z>F4m;4!s54+p~!%QC8SE#2{O?Ui!GO;Nt_}omjK*EcfNhx~)PJEc?Jw+RBke#2UDf zK_piesbAvgXW36I-DRdW@hgtA}nERED^e9LViKmALaSClq@ z6;M!npTH}nrBuc|rY_1WDiZp3wX+_acZ*xUF!xYNWUr5295%Zm1ZGzd z-du|#l>@F^tlZ6Gh$Lee=kSQ)lPwk&Q#@^RtYL{|3HHF7;dfkS9rW5F^}=APijt=h zn@BYbx*zoVbuY7mPPqkTuxW(minV+V6*YB``~V6D;dyS*y`!M2wNoDAbML>I)L4pm z-NB6RBdET80IjEY`|JfO57zw=&#e{H?fso4noThY?1!fVH1s`!0&m_756uTNPSLbV zsn4hd1geWnqH1i1dBGgdg}t?2rFWWnqXJj)3XCKl`5o-zaSo(=2ems6J+iHr)5Gx74OPqV#gDq9|-CAB zp&?V3a$!1?e)f=cJiTCHBBx@=5#1i=4Cb|1aRW)kCNG(lu6(I@>^@L=&FdO~F$P4o zrcqi$o;^K1x9pdwNV)}Tx=kyCR3*!)>*Sw$r26f95DR4c2p)4PW?Iy7iTD3%6 zWFl6wp$oMjbk@xfuo--qX>qBmapS{NhtHH5bm-!~RIfp+DNK?>Mn%ipff#AHC7zes zcD!}tp{BWThg`Xo&hDJfxiCAQ;cqojTKf+c_}pfLIQWdk#gVS8v{J{WrYeF#&(pQy z*~F421DK&9eG+$q)yrtLpWJTkjj1orOn*~aXCKvcWf-(`>~rkaSI2_E*I^(ceM#4T zC6e$88JS-u?xuU5VoJ2``oZ<~1D@AZaSJ8{9paHrs&O~sVq*#~mY8I`5&`R02=;Yc z%q?}tMIo>H{8>oT|B+~4L^GgQiSWFJR3r!+hHxZQiXBF&pWs$KtH`SEP%==omaCkZ zd>*>Gny8r&8W`&&q8aP^^~D*fD5$N0C5D_zwxi^Ot>9rW@c1_RU6$9m>A5`as5aqh z#|d-?Zt*shD8``Zy-Eh$0_y?Xx>c<1xiX3Dp|aYNM~`0{w?w1}`t5sbFvQd^H??;_wHFS zY(8L`n)@Y{=id>@O+asLtxjPWx<}-O7M!{LE^E#&XDa)#{T2oxxghg;plqCcoA5Bf z@ZyvOqhz6v;E;ULr&GI|iz6f*LAkeCODCt&8^0}o#^h<*p-7iS0<0h2tXDR4#|gUx zs4PC5uwa)Z!pZ2Ve<8@Y>L`Y?*`e(G;;k5y)OX6m0$;sMi*|%GIU~K1m{{14rYaZ` zC@}gGy(-(Z;Oi3wB~vQCHPwgYP?Wsz*+AN;4YB@BrW#+Fst)$bZZexrg_T;yMS~$J zqf9Why5>zUqmKWId30@J@O~wk-V{@H%>kf`DOnRfC7<8trGu|UEPE)=Anq~axpR>n zVB**P0%PK*Pk5zh<72(IhZs$5AD%*XM!O+~e@391_SM@*mLDoz7lyQ?!=R~aPr8Kh z&|UZ$H8R9eiaav2lcA$zQ!F;6&j%txA8UTLpq%P=jG!|hg%DQN3mB+X)fh0BGO|Q7 zN}D&$`aYsEzz^D%WDKj1F8o$uFi_RsHZGvs|?hQu9Q5u<2oCtXTACNM=+;x3<>lAP6!g0@RP z1~N;mC3|omr{R+%MNqGQmd|08jF!d3z`%nvr21@V<&>Us4e-B&w6n2*7g~Le?6Sqf zI-96AE=fsA)NZJ+ID&n_CnQ7~?2l|zyU0WlnbJ~HaqlmI2WCA9i{S4VWBri-MM@U8 zAX%oXzL(zzg9vqy6Y))vSqc3_EP6~Z^!7TKL&h+73N^n=EFWB9+VD{CJ@V5A|N5L| zIm}(nK`?8rdjAD>!Q-#*z0HH6Np2ZEL=;qc$E1PDD%kP$fy6 z#TCgy+&ZMB4*7;cDm5?D*)?$Iko2)*C|nD0jOe3iMvvDHbncIneMBEt_6>9%_KDdF}1OGN(RMZomMHf zPH*O{?3cW58W{_GGTaPBsg`r*n?cRd9hD{PZ_Kxvwu&JsO^_{ot#KPstl=eWqsLj& z-T;4N7}~m5ua#c-jDVqZvi4loD`8uv_Tl~ z>)H)v&zjP{wOgRuJ?Tbz@_qn6-gp}_p&q7Cvwkkh%3u>!-B=1>2lZQJ=>@}*Qe|Oa zi&Z{F%=J(oFw<1wPj0`v%jn_3mm@Ex$HJcEb1jOjKIL(bBIKjf*aI6w>;hVekN{HH z2Cl3z#La=omkb}ym{9=~K>Kj0G1Xv6=|u*~y_?#n-z8^x3RIH!&!s_ck8alj$bFXP zw4*b4bA<01(d`VYcJo!{7^6kMvLH_tv85Z^!A4P}BJNNs(>h-4dTF1(|fQio5 zt}Zvk9#1O)~LaymIWG7i1(c}NQQA=9H3!*5z%u6F*|5O7^p zI7fyI3s7FB*dO!qT&CZSgQmRvLr&&E0??jvl?Ip)2`fl8?#%l_D$tTH^de%>Ub6w^ z{6xviX3wcFUzFY5`S-Tx%GN-&u6pw{W~h2Yq_fZGaK33UMUPKbq1EHEF|U;<>?63m z`U$kpP4ECpCY;1TL(2fu_4rSwK>gpO&&B>U`iuemXf1ZZ7};PZ~opiUPdxZK8o&HS!BD{ObW;{g}ScrJ;Y zsfyd@W=zU$_k|&zPt)9CBZ?`4I2hL=Dw~^{(Dv-do?io7c@!SW(sXedy+SQi$vSOn zVuD|n#{}`X`}VE!3+V-myr;P!6Qh+GR69yNjG->DDH8T7x7Ty^VQk0c!ql5)7v=->1i=rlNv17W?Y`{KT$Xg+WzpSamAzYF>Y03wnxy z=lfIc1licwF!R_9DnGoMc4bIst0q&&t7K>fd&SOecseCE_Lc4`c&$ciFuHA{m8K;t zQvMn0aA$Lg4LAE)|G^uf zrMHuvNnw)aa^2D@*~066smR!d7u{dFNX9%dA< z36KMbnDEAV*oUbT6-f5wsANgc0Ub=R>+IK1MFd%aA)qyB>58g>_x;JI8>D!=mkGC% z?4L8(Le{y~1ZXN8S>1`F(5sl0qCQf*tG=+e=mK1Fw-%3G2Axf42g)R^K zE~&s{dhNp6=h5@sglS;xbSfNd{h~<|aGGSH8lqhK9DX=3tV{*M5XJ}6ci06_G7n_< z>}Viscfr8#qWUajjB2mtt5@`i!glOD`8w@Ll6RBD(Wv6QeSFG%Ry+B41f8d^C$IAk zY3saNcKwzu7jp~Dk5VzBzar9i&lps%b}d)Eo9*c0XW6Z2D_Iluwnwt?>Bt+TDll0Q zh`?ffsp68M?$B;TV$AkOjH}*`j*gflb#;c7A%LYHdnL+|EDMV?n}In}v+u(+7(Cba z`d)k$+dGS{6b#+tLliNQr+GdxF~PG7kX_0K2rbB(A5$M~;}21(bu4@dq<%C0sQ zjb%6i>ie82TbK~jhVdzrZvTOkSA~;k^|>&~eKz~etDrfQ@X0}Qs7af&u0I~&Bh!fw z1E6NDSm+iO#OUAKv~LFl(1?KF{_Z>=zjs1fo|2}5k#W*}LUtmAV5n16Kk+~hl}qEh z5veBzHJ&$fZ`JFpabfiMHq!N`-mF*7d#JuP6Ue}A-F;xuzg^N-?BKKQ*{@r8K{M`U z9t*bqXC~L-Jp2_CE~8~3tCg0Q*32Jy4+o3mIuw<0qM-VwqM{8sphMn2`~FE@%zF*n z3AA$1^h&;jE=OpWDQo(O0bPpsK@c;B=$kzgjzS+W@EfaITVwEHmWXLG9H$^N#mE7F zn4+Sl9&nzpVZb|uJ79G#H?z`Wz;>F+UEmn6#=s^Q-&7P1K3bLlh9E`>xL_;&trOAL7#m_Z9%$BO=K|VYj8I+R9qdu z(l*sjktKDDp`F8`JFzcq}!AYT3IeNb+=FL8F|8OvrV< z0c_XXp&wBnJcG2MczxmwiJ(xt6Q;&gQ5pj?)a(6@=Yp-zVk+>l8wfm>>!|%Unj@L* zy%xcCIeEP$cNOGiyV3>gc`|OZU91SkB)#ZkEV(>Y-?4i=_~8WkW(%5F`K(>9br>|_ zxuYOL6;W2shdjj+QL*e! zMQ?OHfs0B3kB8o#J{`cl?v zt%uy~fq|DY#~4)Sx)p}99TQ(PdX#6Hy966snE}`wHoFaW>3JrOJV`)6E|&cW(m?A) zVPyk+1^6t0x4XW&`a3R3=jPDd>sCRypgNf#ZA9v?N_4BDOZ6$voG@!rn7s5=qrkWHlm!66?yg~H_$fp``qVK*; zOk8QjacdXOq5G_(J+avvYimuP zIAc81+H^J6i_IY4n+~E<*S25odR^o(RFN_4>htoVtu1rHthxFT8k<$RoNTlvB|60v zyuzI=9;I_`8R@eFA_p9rBj=*Dckg2^udEDhfek)Zw2H~o#*b<*;05@jpmwAP@%aEG z7W?4A7A|rjivRFeeC&ez4x@>3Nq%-8R-z)r){C^v#5;^Qkd~)#>96p_=zABHXie^J zM;e!}H5f8ZeG=b9M{jNHkE8N(&LQNn)>Oz&G&Mv&54991sF5XxmT0@^fR2h!{q}4T z0w3R((+nR|1JF28fk0Hop8McMk_tpS0wAUlJY9Tu{B1uXDu}zBiuydhs%AT>UG9`; z`Z=Gm2w+U*H`|HG+jq_z_E@sf(O`UvEfZF%Ob;1b$QG|i6rX*Up;dNcHvsHP$JMBj zCAhCq>$-sX;K2jE`@mq{gMPO818ZqF?1$(9;Us*cBm#~m`e4(ft3K$OxD=fakOT7* zYq*-i8%&*7?W@jQ6r%asICP5J+1DVZMDcEF#QfU`&k7sXyTh)dR2eJlb7J-?E=xOS zMs%!d$4oF(2Ss@)A>`~{926AS9RO*|)cJsm?jt~f8t3H`P9bfh?Se{ccX~J*G&^$W zX#XE!Zvqcx`@W4MWvlFxeP70wqU`%Nc4Lqwq7+kz?7O5aNtQt+#8_s8QueJ)LXs_| zP%_q#edj$#&-eTM{{Q!V|Nqa&)2D~r_dVBj-Pd_u$9WvbDRNgRZ;ywD!VOZ{cu@5d zsLy0}Qd`EMXD6p(S3ShPjFi70pJ0xgBV5E%x@sy`pWw@{3L!p7T4}u(+NHaD^2FG6+Q-HICu1o8;cNR ze*lDw!aKjYMbd7msj1NueqUtL3Sbb6(r|X>hpK=D27_p6ZS4iI?j&d)@57`@K>i=F zw6-?F%gft*CpMf?he1pNxcpdbmiG=HD)E==l~Q7CUr%1qEd{<5Osx6T{I?2&en(q- zFRyw3xw$zrpse*lxpAdAVBT>9P!`2Ya9QU?1`q~Jiso9EJOb+SBB;?T;J>+=x;n@A zA3mt0V6m*)I&06T;Ph)u@i@7g%0=|1pe;&7Uweq3#3Ptp>p-9ibj*wd4Q4YlF8%%NsK6aoyiZ=ypdL`;>dWE4@?VXV zGRz@{%2d*o=3Okhw%alm-8*CiS?q6VIs|rA;)Q}>^{=llo_IPWLoW;|+drbM zOTwQ-Fh4ChP#$*@smrf?j^hgbQxV%YAK+QACU{F-$Mg651v!jCjcpu{>MoVX(C!hF z#7rcN>7_?TBR?{vop8<Gy<}^I6zJ#bwwyhf_4jo79?sE{GZhn>bUvw*K3CNV1L|!TJJ5JEuKSPRF~jDX zp$MwDZe?KI{i@hkDm)WSD$iCLk8YHN?$J_8VXGL0RWZ)B8n%+zrXUxa|^ zxTj@Jc0rC@%`5%pZR?(Lf)8P?&ZWOk0ITb;ytpBEy@!jxqL_Qbq0#eTP;h$Dn`cy$ z=HE4Y?lxfbk5>w&e&0(a(oLHBFjcyJzTp`DSYmeDJ0w}-QdG_PHkGIjQHzdAY({;m zoA$wS+854C!9P`J-3nG6c7j)AI1(nN7v6 zJ;jc~&df)(cJgx4{BtBI zB$LmsvbCeF^D&;+1oy}J*Ug3X#s`{}#qOz>bQyc5`zhQn^`QxN1%uk%-GIxg9O^1~ zc$Y68EgOs|M7Sl`Rv|Awt`8pq^1Q4#`padR^+xD*f1Whp1Nh2>BlzA` zYlkPRiRC^^1=Wcmm$ZRzk@Uz77FU6%D7z}8eE($9E<$rR!50+xSeb< ziq@oTgC(}k$3@wJgo8q#pfs;#5Z)$=75)- zXr`MOHW&BsM~O^$p|V*Zjn{Q=^wZ{?S6eMxVT=M>bkjhdcrNi7X*bcJkNQ+R`G`Zn$t0>bS1@&W@Aku+db&=0d0TB! zrqgQ|)Y4*2yKQFQRJ3z;*^U*lU7A*|1fg=k0X2s~o}OY}eK&mY^Icqc$ynrzcfjzg$fjM~@~c`{+<&F`!b zUtxZ%X;!UfQeUx7dB78|Cl8NQ-Tp6SDEBwzy;Qr?{w}73G6hz;;t$EG?H0(Ux1r{2 zk6p@$-4~kazHNp=VXJ+|_Jxn~(EE$9mRuY~6RbbVNZ|t$uG>_WEa@_~b=wRLtPpP) z0_{)nh0Cx<2b&qI)v{3=65esIwT?KP-6>J*K@8@Sn(9cM^H)zts^2TU$*0CFU-ge^ zu?qB)dx=OMhaI(n(tJoJXMSgTcTb?GkT5q8V2YTyG;KX{MgMrvjM9uJ8~0shp)7iY z^{?lCgSFYC*3F)3;$B@dde)UCTP?j5h6%X%3zH?I17&kEQ@<_4VCApIcblnsN4C0g z4rW(8pHD}iz80o@mSOY9`qhuv_3so(7k#Rxl0l7pEDX_z+%}8$xbxI|KsH$FiM0ID zRr&jXXp?YB(i;7tMxK?5*fXVmRDG?!{p|!Uy&q9~qvXkwl8^e8N2zs}TAR^dM{aWr z;78xDcf44&%-Luc;j>cK0*U-0D3^`fX^PxHn%YN@XS$T`6 zkr*wWPmm(PoCDr>r{4{=iut%191otEr}+1`NjrXgHuEt?HAa;W`Y%%i`a?@zz87W` zL*n9RJN?3^-%m!%O|P83W_+ARau;z6!U6W#+M6NJCEj8>@Yg`$Y&^v70t5TYJlX=- z(vz*J#Sr>8IR9njF+4p0c#8jzx2SYJUEH*umA=25exu##jDv>+tk_6c+(4S^u1>C|l*8dse890uI@%d}jcz4anmFTW2 zDnMy$)5-z+OrRhAfB5GwCqmxo-5iNfV3K|gxV1c|o$g@=>tw_k0xf99cm7pWW@0sf z{_iKt`13%g63ro?0c|zWjK!iaJHY*Qh zFUE!E0nI$iY*uS!LUMrP1~`G#<{$m|d20%O3%Zdz3G7R?wxX|oyufT1eppq_S@2o% zJ#a)>@Lt9v{b%2jdhAbyWcs>>mJ!&kW|uC~Jy;(yMV0jT)p>G1SZOx`Ak`h;Cg*=P zB3|)N>WPT|((30HJ`Kt^sTd`wXCr&Zr3?Gggc|o&S2tu(S5bjXRM7@lYT8cHDt^g9 zI;kC>z9QnWTkGASuXL!t{L+0I*@PDAw>d_+%YDYd3#C<;?puc1VIL;Y5m{O8-7b;P zU7a+P!hC+}CiFkZ@@HpOrJTCEM_i0p2m%a$x$?6@fW3dOU}quaCdS%;Gs$rsNvv3m zKJsWtMfJ4*WIrMAaaG_*74=)st!OQ$up5}uPdt6zc)4Euw0Pi|?^us;duN9z)?BV2 zFBrSyKKnA9jTOI1>F()ywDZ1Fx?|Ank;R3jeA`q#y2A=Z{(V%vZbQ>8a!M|TIg8xSU;IwNtF_XogLFXcjd}{P2KFn(^q;87lOheL-@e0ASjd$#Nh0r=vWU{te!Ew+h23%V z^pui=S9LWg4|m@~DjR5@2n&t8q&jzzcMLomK`>Ust$p~#MIwD*(0dXns~Hx_c! z(RdbzYH}%ig@>3vg5@Yzp`(}pzEC87fiy2<~Hj^QB&WNP0fu6?q@7U+~h`QJEuBbiMXdQ@#>{sZ2em+}0djbx|$v z@imEFVkyCadptN`iYGZzG0Z*3XU6`oQ9_L^|s|1AF>K#{8wykF9ZiD@^aV$q7VP;6=6%lV4nnX2VDd3Ef6?& zJQx2}l7BFSPkyOo=6$p_)Z0kJa6WLm_;&{Z^Njz;55FZCgr&#YL1DVmqNIPt=lb;n zV2P#DhpBw7q5{>a0_JN_mGZrQy~8^op!wI>n6DZfYH!7c^XG+~r=lq2BfSOYZ`p(Q z%Mw-lWr=iM%rSNK<~1-~QIZD-vaQ;QY>5#tfEw6bnXxoCXV_R@zc&*-G10DlbC}Qk zWYqR{sG*5T!TNfjQotM+w`#E59#H<8LN=G(yW&|T=8&eQj@mlPFZZ1uS^gHefL2cR zl}&g0Je-N!fZewYa^>OxU`F8Kguf^TPDTeQ&QA932@$z3+MQG3&nn;#eDy0sGb% zZ7Uf9mG2+w-|-(cAW9t|RhaglN<;?(bX#=^pspl=EfjjPJO6eAYyKJPvvL1Be--=T z<%`pLFlnd9PkfUll}1S6h01nHpfVY_zg5!b>L|;HknT6&eK z&_)54Y>e`I|ITQrH_nSZiOp}4sp2!EpFDQK`|`y1{KWbs3!tT6s89dmSb$0g~ zEswkP|4*9h;iIvK_|6+qvdK6agRj;5_O`YuW1Ua|GHHfSMr8dvHL@Q_%v0m@mAB75 z|K&vZJ+GZOgMRk%>Z*JT-l$q!RvbG#)ARNs-6^br$FJ$>x-~!|#RdRsN=qOT+wo|=)~w#6Bn zkS>eADdXdqOD}fZ!rO->@|!cW%65F^;U>g^E%e zJ;jOHB8yj7Zn^isyXK@M@j{N2?O4=|$zOUL{4-cd{tCN6WyuDgz7@UcN6p5RdxoMP zzvt>>&?jp(h~lns=(16$xrUT?scn6*{5X3rBSRm6x4bvE>3bJ`wN8z;B=}Bzh(8s#jk!n7Gt?OWpnTe ztw8^4(;uaKHNaJO3FR2-?5zItyFI6@p?yPS3Nq}bM-Se^amySp8<2?X-EE)CVdKYS!r^C5a3V>8v3`qg(%4l7Pz z@%G{7&70v0`u&07tKZ=T$ZovSU#W?+V^tTaJ`{B_0S#Pmqbr}E&`87C4KV6F80{Wl zsjPDuawuHm%hqx|GOs+Dwe5?CPu~q(@8Way{tbR+P_dMKa408#6BNSdOifJWo1S++ ze;i#3qtsj)*gpRIra`4*#;fzSl%`Rvjlgx9-ySlktdnUb8ssGeU@EcBgMckAfRY@+4r{&M=M(?t3hOd zmIP20NmP?k34c>Ub@kc`s5L2%Zx#3c`E!kbKCOD)zUr#`H5718j2IMxmy4gSbZroz z5};?&Hg!NXcsbV0$f(5mKgMdmsDT#)TVZZZjRL{;d7*cHzC5+qotMDm6%!J=S?1L2 z`RSxb@?Zg2PdH^Mc)u10*ZY-L9uq<0e8O7O#4RU*++A7Kn8{0dk?~8l7cYr9TP!H+ z%NmD<9eNhHzH=upzGu)5iyxk#o`x#R6o|YszEd61ge=c7h{#HN^yq0sUY!ro@Bvt; z-Gv%k+2^OnuOkvjzDz`V4^rC|_a5Z8$DI93FAbWov%UT^^V>z( zL`WwTR#D&$g|7_-ydL?^8?nu}&BO4+jmwG4*}4a;I<&V7@<@tzb1TB5&n~MZepW<+ zQ`m}Cv`DPL%2%NLg-iIAA|ei``%R~f5%#F;cgq>O z)nX(MTsj*E9D`EI;oTyrW3{+2f1F6=Fs=yjts9*Ozf3LiGftA8>+By+UH-_D&=B8k zJ!>!xoObb3r&7Z=r$m*!M|GaP7fostzpxz)%+Q;%ieY!Qu187p#;gOsnk6C4hsE!0 zETdf5rZ47GZq%oA57w>0dM87BUaAO%aytxTt{6%*2{_QCtDY-(83J66;_#i#;?WEW zIu91z$OwEEs6m>x7mbkq0P;&mfE(tiIJYX?QMY{%>>cAhkVhMokvn()nzY{)a10Z` z9wb({cHH!cuz0a3F7qK#*TvZQ4xbp@#rnlimjE_Ks5vJsO(+({lMl$ZmQb2tr3z0M za3d1)c43b@HtijFbv9wbH44fID9umPgU;V)ot;X9N^*h>N*Nh-NE>fnLczX^qcb0bDy@t2GLIHtc^;4Y22t|+s(>~1hDuash=v2;5OjtqB;TWw+eRF>S+@Fz<1zoBqqV^ z?x%DX?*7CqEvVAdT-e?9khv<}X;{9tygBj-GxCdrPBtsk=mHmmp3W=+EC2!!9&`3n-I<`o96TTfFr zax#{oI6_@0cCxa|^H@TruP#D;Jtn`fPQa?#pv3n1C9UN&!7^c1NB$JTUh2i)rkB8e z68J|AKE(Kp$12Az__&Khnmh(kx^=D?Wd)h-V+xO7BC8a?ZLUI-iSuL{~xRhp~YZB|5OyKO6luEHnF_1#W zru!?A`Ji}rRj>sjI0Kv#`0I_0k1N&JrDvxgD(mi5uQQvK7AQ#_vRg?7k(85C)m_9P z6}|g=ABujQ=JfEUv98?kF?J$+$0WZhjllQEK&Co2H|Gdy+jCFqP8fUoL9Dk9h-&0G zpJw1nt#EE)xAeSN3PB2`s;U|jAJVi;gfcF^P7$B=ygkoK@;w6iGOwPT8>kjzHN0PxKevU|^d`WHYyhd-C7 zT9jyc;ZEhj+1Uix0A@RKXzdbl&CPYIqgg(0KoOo0>U+$qw<}XNy5?gf-2;UpG|PPl zg#t5Ne2a~XUvo{rjP>sfF#r09+?l^C)uBjqWx}I!0=>ao3EcR|-)l;}@zVTg(fFs8 zHW>HH9{!)M`MXdZ*SDnU6*EL-^)s`ENCT zYHMg{NSOO2Kq>Qg0VHTOi#K3RAiIA3`VrGrzH7H8?`xt~dvh}}3cj(aWO1LL@GmZp z30ho}`I1zEead7jfIaq7kq{cY1L6FlUXM+pNZq`t56scsp(ovG zJda1?NchQP5`F>{Y?bJ~EjTV^t6zNH7pT35UbBy2y2^P0I=jjeMe1mb>=0tuv zgnJzbAUH6D9c@Eb{fwx8nqv0aw%U_h%C_Pr@NY#`5)+m}iH>@})!wzZ4RTdB(s}A{ zC2LgX7sn{|bW6gT_itd2$8)}Mpfnah>Hb=O88N&$LF-#4IQ8l0-s~fqKo+ad&1O_u zY$iWc4kNVF3A(dJKb?^h5 zkd-}(4?np6SYC*=E7FKY19dK*GdhTg+<*}I^#%@OL@Q*)ss7>?ZuIyyarmZ@_k|N? z0za%$f|u8ArjF4lSN8VI11wHo4S&5Xxoj8-q@Bjat&0e`k|5muhS8~`o20mVA^}I zF)xYR(KjxXQr^`*r=+4c-<6})du%rq?9a_Cq#k@46n>6(yeJi)qvW*7;q=w1!K|BJ zpRr@Uxb1pD`9gG+()B}AZ?c@Qdcv&De_k*};xNLos~8&477j@x5QMS$`SL27xdY@m zeC7@g#y|pWUee#jZ)Rk@ms!dv$vq~+={`&Ke4dbwh+)O{sprhU&U+~5H_Q}V>10OR zd?rn#;9}1Q8bP7^WAo2KkB07grTA8SsCaucVj~9CvD02ku|?1Qn9-}qaIxJc$Jxtr z{5K`Ou|(Q{P-1X_M2Rh>gqnR>w&L|dJ4j z6aD zkQ);QiyPbAuVz_0& zdBnlA`OkL97+l>ol^HLQ~JElmj$SehslrEVbNqGkhF zG?kvKPY0PWB8G@SRuPcJytvQOPRSct(;^z4%-)Q-te}EO?e@NhcrS%d%v}-upgLs`L0Y`Z30JH z9Aq76V)EAS-lC-C6#!xk!lqY(M_1ug29D0c!NB=6vO?IRevOV6g8&u{8^>;T0S-~} zI=$aC+aAEDHr7S>0TPS0Ul6-4lSE3IX-+1+*=c*W)qb6cl@%ND=f|swMlF=0d-|3s zc3!dN6~3PLO1jaM)3f;A?Ve_~0k1-Ajz>!R#*yZ6quV(?M|o3C*OmruL{k(Tuo{`) z>YkelHz1U6E4@+yP65L3<;ck1I7o_0)(3xzUrA}#0|RAoocgY0bSHVz{@YwL8O z#0Dn|$GOW$*W_RGoUpAK)+T{d8PerF)^0AT6^JGB)x5GUI(S2TmbxtyGZWJ!C}8s; zfEvzXQxF{@NpH5|f{^MbPog*y=6`)jTe!n{<}P>{V@~l9(+|!)JSZm>0yn|j+$f}svUf|yk zMvB4J4D738sb+=X^!?`YQFE+5Dmps4AS@@;_FF2(1SPeGL#)Lw`ZNz3B9huXs1CF9 zaaXw@rLLF}P-R5biqES!#4ii!wXMymT}Y?K=0WgB!6+4lt0M+je2s=!FMKeCkjQIi zFB;jq6L>PqB+}`3^VP2vhZ;lH{8!Djmc7hSuD13S7inbndO#f+JMyTFDn2cx&FJ@M zAuF{|;hedQj7=i*5-}OA$hL8fsO$mw*atMdeJ*`w4o z31rRwl8TBOHQjDkU5+0R>OW`uOji28Ph9Cop)iyr`X+|*0oB2Sv$jp4LGl~S2@VRU zZ&5(%^dsyY`0TV-0&cacvwTXZzhJ#Xon5GM`$L-4zZnt(rOVFFPIdVD&)Zgf1g)_H znhH^5bkn(-(d(%NN{klimHg&NKZ-!9b1+q(1R;ZhjPEne4`21{!)F*`kI>Q%rlq8y zju`EE<|gq!GK|bv7d6g&$=Y|>g9btM;~G-6YSs$#C78jg4AqHeUVit{Yotz3bazX7 zXvFt~)zQOi@5Gh-Osf+P`zqC|%k#KBRlaLlB$(QUn{HB87xkaF3evtJ5Dc8A?fZb< z$k%)S(whW1)eZ;%9k{r-vYdc)D0nE^8T=F?)Qf*JS3`|CP&8^hxN$cDBn0}f_sX#s z?KYljYmzG&9d!Y$e=-rkjB$>{ciErQ*Rd*G()fwBt^y3dyqMgmrqk%$z1ndnC7pM@ z_Tnlp7>6v2ZfkEY7np`CH%FBbd2#PQ=6rip#VJ&o<5-@91_kVvW8lw&d(epCkjnax z=(qDjzmocYt^Fa6kCX69H=x&@Q3w6@D7HBIbvf=ck9tY))AeDVY6c4vQ3y(NZ#+BP~JHztLZO7H0X!OJJB^gwU99YPUA$9%v5LKbBC|-I?-DExWzGi1&xh zeau1Y{L_BCMO2(iyFYsdc2(^s>)+@p1;}U7dfAYx(@|(*y(}814+S^GWb)C00jjtGzBS# z9Gi9BT*Tm zdGw#czS4e;rERsr;5(+3~ANch7C}3K$$u0CZFD`xs{Fi z+J#;E6aTDgXIZxL$-)$9LQCl#;QoF!3 z*P1<)yKoD7b4-93?{Mo3UhW3ETjkQn8)d%VWMo1{?dz?+-?aynQYB5p7&F-S`{uek z4@Z;Dt=VhHbM*nTR`dkTl{fZ*!Y3}9PT%=?f?F}*7Dvo#5!(^LEX6dN)v)zG;kv8u z^ZAGD2Os5}60#~E*sFm^oK6htOdHbh`2MoIhyx&hGrjzUsQA?=|7mKSqC%5yMypQK z*qq$5gL8jvjjvIX-!__Vkh)NOp+oIUv|ZHiKRo@N>Y+MAJei5A#jHPCL(uBO4LPXc z*s5=XG%15h0s;avP}u6fqbIlmzGYQc#T&`h5s3KDW#^0zMQa8Nv+mVGc?0zXyk!f( zftx{w2<3|pm@V+EdT`P;KFAXUi=*1;XI~y-&I~>9j5S3nk51@wK7ZGm)Z54SPtTjQ zgvd9-ew!I@ir3zRuCv?dQa{=9Qd$WM^wxclcZ9JZB2718RQZdmg(ET%dG5qJofe}2 zVIhag?u~EW-rkm=Gi2d2LeRPTzVLH)Y=o{VuC5k#gS=j;@kvQVU%s40nwC~hfCX&G zp`%=GbJxLY=*+2;Cj;FvRYNqtZqYKvn%aIs74-_cioR_&e|+>4$vg4__4!(-2DB#X zUL48V_7guxEeLJ5oyG2$NA>yJpyMnJfiy))eAB)2JNs(eocBCpQW* z-;Yf|h>i1gH?%!MLsQ!T@xdIpUN1ocB*QQ2t)}J^?Q<_Hk^t;UP z5*d@_n?A#^lQR38ZCGd1A(y<`_H0-wA#r=dz>;K;h0BSbo(9l=o#F8zW43cJjN!yau*=Yn3dTBbwUoE{mbGhqr%G)-5W9Es7O% znH1um_lnQkME9%TzBM)U?k<&oI*yQ;w!vJ4{yuZh{!g6c(O0piFccaZ0+Gb<1s=-s z)IQtPOWkkpGj~!v$jLDKbzAfk=R&#VlC_i&vT@U%+%K>$+E&lj@zlh{Pswle@e_2a zp?WA+&Do7@vgz(E-k#oC=OO(U0RdGe7Fb^Af~R%%##nW0&op$;%!)clSDy$se;WCE zOlE~14iKXG-RqIhTebFJ1(Lpg9HnrS!5>~}JUDOhXlvTMsM#-FsIn!xbL#ulbvc(% zu7igTG1#0RD0dWH@EWYZcNq}m`K1sDoNe5N$@wn&+!Kx9!W?Wg`nu?+e&RzzUP+Up zYNkSq#kUry75UY2vxm(eS*_Wu*}j~)y_zqm&Mo=;p!tUG%x9qD%<9NoVNoQ{sbc{t&xed?Aj0a8t>mq;^en-2*T%-FJ103Iypnv zu~TuUDn#g=gKlLgnd?{M2ah!ADQ43jib-#hnvXnYzWv$8fl@~|p&0o#Q{*cjVtl%A6;a6mpfdHu0a7Di~LZy|qq zzRUXSko1oyyaxk}5R8=~c?ZCJtPo&o<X?(yXW%!r}O> zQ3Kr5$80)Em1=V6#hTm4Q8eulI(gnK7bG!3x6-Jq>_wm_P&&3Y|SOc`>t!47p_eqO)Pcj2V4kf_0AkQu@V6aDVUv}kmW8=#S^h;G^$hm=K=uqZJejC7LUiqBW%pUqxsd12k*EcA)D-3MXl`%xxs_bRLOZJbKFS_R zw6PSgrhfWoQQl?wVus4j4K#{adsHNk2)H25H3>1VnV699OU+61V83t@p&-p}_tCe-Evt{D$rs2Eqe+@@Ce3^OTC2UYvq1ayQBpO-3(Dn1Ezs3U?TQ@7_21tdVmX? z?GNEe>i=w~jAv2AxP_^`%V2GvQ|mWl{Tz1To5M|E-!2s{bDU|6Y`gI+nDD z8;bDZkG4??_4BiL@3+8Q=OUL}OUUpsAe>j!6#gF|2O0G`f}AUhNoMID`jro05a|pC zgwx?}``xdapqhek9OuX1AW}jhlpjLO_TRu0iY2|o9V$XrPqP5^5{4fGX!AFoMMjEI z!U=V$>dxc4Qd{P^5mpxCld|3{tWHJ$ql9~Q~o*Vs|k$e zFgyJ7Ny#Xrg#b4A9|>#CHI8A~zT6`6SB~_4)vaK-hoe+Qd@Cn>|Y5EuJ*R}RlFM!3-NNDUi+@e?Nc=X(%`;d^tERJPvwsge zaAu`dAC3+l*Ww{^wH0j54CmXK*Gd==0-NtJ>~B3asW3r9$dS!2rm1M2yBWJ{kG9go zWwUXwN3V=_4tFvh+_1^7+tll;8fNy5=%kv9AOADuB=K?56Ltu6$dbMJ{C35Go)*qP4=8*h+K1PUw5vA@R%dYveN2ffTSRMK99G{_QZc zev-V%8Wnb2a$Nc}{NT%mrtf}$#88}_3y}BT`lX4QJ@uN~(Ev(zG02v*{UCk-7Gn=t z7t(|uZ%UEi*&+hDjk0>|ej7*!6Zn^y(bd*y+~DP#$_@9RJV2KkFTUTb>JVIY;Q#WA zaAiIy4u+<{FNpTuMgCogXezP$ z;qL$ROE88wU9Xu`We90S({-=A3=!s)vY4Lb4N_hdCH^^~J6R@qOhK70IF;;Z-~BKM#NC zzFB-jG#N~k3L(9(OMNekWX4GX_fI?*GeQyLM_)!AC@46W-)#VAIn+w;`4a}LZ=qYt z1LXi|afxyt?$cA!*|S2%j17iRC)@Z0>;rtuf>#$>Z%3g_j7PdurDz)}e*%H!Qru;v z)hS-HG<3L83mi~mtC7XEe0Yr5PoDj?5W2q>Zo{)3%^?}j(bT8IHphm7zNVssY%?<6)?#YHw)~MoL`*^GpVrUW=pbeCd2J#w zya0_5wx|__KF*73sG!;rJ7MgW<(w*b`Sbrrqr6! zZoj>VJaY7C)0Lzrl5ZOZz6|nws~G=K^luHW$J;TzkLmhHrDIfK$2_R6!Rn&#*Kv&U{YaRP=OJSyWhz5+M%TZ2Vwey0=DL%nI6d zh_bTwz_Ox=RR`>s7Fdu(?FdrG^Rx*7|I=rR7*gz{9cn;f2$3Oue_K8mBZMt_RrJ{` zN6ln!cdfbQA@)&h)=Ic}(t)3Xt z*)e^bz|A{NLru-#=~G2<0~c&ekUQAUiXJ}n3hP-}w84*ma4%nW|98MM|4yU`5-UM~ zzE!0!8s_Fy2}kp9+*QdYo7W@~cfdY5Pm2|OoD6~I*%Ldzf1u5chsU&X_L(wyAdJl? zsuH$6O~XZtx7!S*A00e&fczl2T1(gM&+ET@I_jxdv1k>;{V4x%A8%?<)6>dkb)c;WoQDV z7S)T1U(-{`getRMUQ^y*lw$ii2IQ8c9D{-}5=WbM_~yP(fvFI@Or|&OUzPFWSt)N{ zYSM&Uf-~ryNv5W}ZPiH2yb0Uz8aDkaLCbf1@TfCJ6z)Uf{MNU_tr<~Gg=`A$>v zypf&`MJuNv|Gq}<56hnTuSmH5mZWC~Y z7H92(`x%CH4xjgN`cA)QGn{ZdE#SlAUH;7-kPbYqsi`SX)Vpx3`+{!h-|c-6GVscU z>v~tz6y$@zlqU`>iC`y4Tgty)e))TckEA2z#3|`{YDv5qz|zytp=0!? z#nY=}Tdqi=$5F8G&IK&n0<=ute#<*+BN>HktgOOFf@CNp6Uj}|9S?y@0x4D6!-q!h z?(V*G-xVQeOA@LF5SX7$y{u|(u+j(<@ehlnASB44DKLv94zRD5W@TZ?Y`!dx|E>?L z>%L20o;r_jjVS_IQD<`#!jDkfm?5GSBgNCf)qefFG*^B_-b~v@nZa)h3yTdZdY;hi z*eWZW>fqmC4S%Au$czxsPJINDu~1egB^6=1p;KKd+6_-SI#wITy0qROlko#n+Iv7O z5cZ&pO-#hz#JYMxyDYFgmL3w7S%XfLXX~8AD=YXAMC_X4U3S^gGO0s`ND~8t+_mA5 zB*4nWXbMp%*RLNwd=O$o-jxDsND8vU`|L%iNa2BqjB@xqntbrB{)MDXiYsAPBz) zR;mSNY8!7Q9~p}IRHhQq>3skX^g=rN!Aif(x8g*5zol!-3KU-pnl6`kMZY@VG+$EQ zbaI}a*aY=v_p zOe6*>5`8$+%?d6Krl8BFE6)1(rORwEFn3%43XUxbs3KC7Ks=JOF_N0NEWtyJl694A zlEw$5Z&ERK>n(&|W&DdqMds5$&Yd5JnKw%ZrkDapOXLY;zk?#NlkI@1}-)C z9wGNyr?S}g8(&};P)5Yh2z+Q=^Ak)F(`{Gb7m`S9HQUo4*5!`pm_Ef@qdelCm7+H) z;3J<6UYiS3gKegaN~N8e@$ddX4*Ss2$jKklA5z*cUxQAQZE2t{Sbs~7D8RLcC~v%E zoc}o%mRlu+C3=8O(zqd^K@_|6`gZm@w3+!?@HYM1Q=!GZ31T>)^>e+8xG>ug6xPam zcnbrBl9N1=Ya=_%{Sa8Up_FRMmnVF5H%L3+?~8YXiL}4RSr{S~5_U4A>_6v%-epvG z`bTV93WC=(MK`6zBpt1GO(;R&_Dg{diuWPvwt83qx~&zlMO(yz@Up)2qLlVuZQkv- z^VhFn%+_w-ylLK*tvatXRDOpF+t))0_^Q74t;4VAwz;>LZq5DO@ilTAC=HQ_c@x;d z{!1Of4-}?naL%o1QGoR%U{a02KVKi*rKI|;yYiar!hXOc)h}M6P~t*D7j?lM(9z+U zi|*42;{(YzBK;}A;i~BwB==^tw6uq=%pXzx`$`5f?Z=VQdLgWK-^@xYbl2cq+R^}i zpA5r~wP6uaQQk8gi6)`WO__rG@n6H8LP0@znAzD16;~W&ZvH77ggQ1e->S<4Ed&J^ zuJMuYp12L}^F%xF9t{!L{2FyGU*hk%lb)Xxcr0M51QS=lafsB--a4u{K0`rgjGaQV z7#u)H&C;iJbZqZVu{Qqmd>s4F_pCz5im5+0H`fI0j{2W2%Q!AFB%h{ec&qOGnzAfc zjZU-w9n(KWDvg)jl)sUPqydNWfrxG({k#X=Bc%;E=yuEIsxw>@kNSU;HqMR)h=>2R zBY~MGP!}ho@q%hAPO#YygUdZ(j&9zNRRVH2u6DTXE9KxPfe8#%-jY;C|~#ChK)pMlwspB zQ1&PSEIo_CPI6neHl~USu%;5a z-5hSU4;_+KNHhX%QQ{zbUreJY|DkZ9y}Y>ztB@k~fxnx~3~5&j{9n^-Tf#MIvUTW{ zi&l%Y)P(O|`uS39!7MG0A_4GCP{2`Z;f5t$(~~?_F<1tM1wJhRnxSCG0Up@{4aI>c zUY&zjPn?_|8WJn2BrN#yM2gYESy~Qx0!-k{18Q?u0~-u?58t)X3J)xOlCSdVcdYxe z*J?(wS0=8T=IMSgWzi|MT}zMO3o%#cJPaKTW#HH`2iORFdcvDuV)Cv8o|zeJKQFet z`MCK0#QoC7=?7=0ED}+gHD|6t)xOz>k9>fXUJ4Jcf~NN3suWi~ddeaQIVb~|Hg?Pa zjzR8}*wOV+VWpPB&QD_Z=641EU8m9qKDh=M3k@1-s&D^qQa|!vRObjN%c_e9sr;Be zCxO$^0%CNW{{rWrpr^MRuLf^c?~#1`b@JB*oK8Wk-oPQM4J`_w_0bTt9W=njy8Er; z;$ShK8>WdS@-q&xg1ZFZnJzovwB@VKD~}(6Hi8*gW`LHK))PuA0E)T@ON*dezmI_0_)KuO}yUcq*`Qt99eC;9RT!>#bs!zIgL7|Ez$HNekqX zZs39_cLg=o+%10Y{%NX5zJJ+uvhqiZ!pbJq51p3pP5#aG3Y*YrcYCV9{(uc11o$|9 zurN+Od1StS&euyXOMdKqDeTXv_R^bWQLxySJEnyU8_U=?hD4;Zn8B zJAf_CzYqWW$W*%=5D*mNs4xY(I()hKWM;-rh9e<=*cm5jW%S9-pM3Ad?vIQ8iYhj0 za_^q3B%f`aF(b8}r}fw^-2?{~_#_RL&SS?r{G zRXeE1^M^*`3`_1^vwU{7pLF?KYGnmni2c8*f6<4IOy?k51z*Eru|1w@``@K3@{pan z=FBXEs%c-=AGpG*|I*_-=kWac@=Nk3U<6&`xU=;R@Pa3o~sMGS-YG?>GKq=s=u9oCtvAeb+^Ac_?$wY z9Vjh6$OCqbY(S@2DKf0^uL pVE0D@*jp1l&koA%JLVtw&mNrmY?GO4g9HN*c)I$ztaD0e0sx-_E^Pn+ literal 0 HcmV?d00001 diff --git a/applications/Chat/coati/ray/assets/2m2t.png b/applications/Chat/coati/ray/assets/2m2t.png new file mode 100644 index 0000000000000000000000000000000000000000..94ca1c361985197a56966342cb4a340983b46240 GIT binary patch literal 171753 zcmeEP2|QF?8!v67LhGA0%d1ieGuAJWv86(mQWQ-L#x~Y$SrU~*3MpAa+NG5iOK+=^ zr6@%xktEsI#CPt@y>RdKDX z+L$p6HDkt%O<|k}j;uI3jUD{oSZg%}+?dPh^UKDJZJlN#x5dWH-hgOI7{h~=p?u=O z%7M@NM7*ID-i!w$K-A~K%JN_^y293*&4|j{7xTlZA5@Z3c9hijTOm+AVV@GS%D)w7%WysOk4~I21j`z3w);xV6j+} zY_0U*A;bs>!@$8?84_SwVFY1mKq4St6+`|h0%rori~t@F42KoL6mLV^g+2)*fQMma z*l`{}IXqP0gPA>fENPFcptum_d3NDVZQ;xEU_?y8li@7D2Sexsa=?HDMi(QX*YWZq zmbMULzzJ9b0|D?KIJ4sr<+tEPC|3hdLiv5yYl6e5`#{sP8zeU1cY=A0MGrpSlxS!U zXGF9is9WI4 zYRe9oQ!rCd+83J0@^~|%DKutl38uRU(DVc6Kz~5#MtOO%QlLG{5XpC!UeB`$vBNM@ z_>k`?DPju@J?<5<%4eIZ}n<1dT6ZDDbuU$10 zbO9PuR=oEzN6A!Bj21E}s;^Ta{n_+`ojKI>lh(5Vrj@L2P?q5L;7K$!MWN8ZfFPo$ z2P8seyY)rIcLKolRaUUJMQ zkRp0wgq;R-5+tJN2Skvtc}59RU0$PKrD$Mafaw8_B0EKdMfyvMh<5xNa0Jc*eU!2U zUMu94qYQRe&1X(Bhn!;&fp@4$_)zaXYg!&cr;_fwPVGP~wHBpmkNU$LVFZMzaEpX5 zKZ|`JCx=$-gN%?epdu&()k7!*&6Pnnet*Zw!@^EAGXBW8qLjZNqS??s9ugw-&5R-* zeP=K%w`C)h;sVqNpnRYyL~9nHrXA&%U81HQgN7d6-i{d#i$n;1NR&j7m+yapzz!n{ zoM@T`)*7fc9!OrMt5W*~6@$nfO~wd{!!ErInv6jhnt>-{Fao^@)aW0M!x|TX8?^$) zAn_C$Z-U6dzRke?4zhZy#b6yyD-`G)mR^Udh%8WZv|}36L1@%b6qU~Ew9Qa#_zS%? z;F0Mch*-2MOc9%ak~mF2K@c%0eS#@+H;O$B8tV+j(vYeuCO2xwSbl*qCL?@tX~)`!Tt@s zI-x&egRkEiaU{!y_`b+gLZ%2B0*LKHLjXCiZ&L{lY=(TW(II(su+fp^?GkJt!u$)t zhTw3C?S4nxFunU1#Em3=dqDT_t$4t~7P1TOh5q6|P&-Np$!7sRjKGSAj-rIVC-Vn; z#RCRI?XOWeXY{97D7S>IECxv&qO2_aN**v6TAo%n_p}EIbV{!~69QEQk&E7bSsH_S zN&1yMU@+87F4Zvqo)6yll7~*ocb8s|+;2yT0}Pdp?IY!9pGVhOGX6$peO(DIPS;QJ)1#BlM;l1&5(#j|WA`!yl+i zB$`6PSL)OS-K2;_z$hYfFqAy>U5En-MJ#po4b74V_%u`~HCRg?enxp!e95tqIT8-sx0FVJEL_)++y3NdOEV9cv!2{T*fXN6kaWxOA6pkCS#_YaX!FVH6c^ek;Hq_|WwCMNF0Hq`Wc;rZgRe z{s~;M)B)FE30!|)l7F-20ZScl4VuWc?~+6?iUK8~PSF{|en}c57eN`1&cyE!3KD)_ z>WGv(VupdNc|ejkD6w}5(*{k=!*9qH79@>mw?ljOKYaT)_UeQ_T=Osjqp#l>@j+4Z zAOxqQhJLhxNWM$7foO`Z(FW{D{Do*kXxIdIzaehe-S`XQ7KRmE59p?E%>z{D3>pMj zIcx=sl;PeGKk4#d4KQjW4Rz^&It!TMt*yc8NvOsS9F_+wrNB|>ilB3b1=J_Oz?5JQ zpWS6471RnrOpic&f!|IsVIg2)u(&QQB`8jOmsKKgDAT2J1UzfHP9@mv_utr(!#nU| zcn7d131JrDwh5q!5svEbWW8v7bvy%bk3CMRNH!=9V zz6Ka!>MXyk>_+fLP`3n(fEGbbg(_NvWH{i>CNRhp>3ivgz)ArTIO`1$13dc%MXL#n z@Xu9muXRCZmlyK9h9)`a1=HEO1^NQLKQztA(|-%}h0+?PLmMD}>-<@ljxSww0ldfw ztq3UB)~D7M1ap0~uIRB>Oh3(kL2;M``OEWP7~y^p;88LdJxzZ>Bw0rDbTCExI{T@a z3M3Elvz$gxEo{Iv>Rl4!FKG~@o~Jug_uD)uDBcnf8^rBuFv35t5dK3QLUt5>TZeSG z7d_~ZE>oPUQoo~XR6nVj-uVzhutn(VL%;_InGO-^>Jw1;5Z#qXxAXf!i%`>au%x=V zvvhVEAeDvcX7ZDsIvw+)@6H6I3PeR<%>t`wm{9v0?gSXk&gOLBOP3yhh?j_T4;!hU6123%o7+Hr|0u4mfHc`<1lEk|AJn*z zW}iWjD1yb-(PvOd7>3elpFuD-KM^Wg%vc#@#C!RJu0Mvm-gOQ?$|dQa3&PU@9k=!S zJ=XDUGL*()v`p7yl;!YhDoPvhVw6^w7<9u3NOh|Z`PPZu3=qwBb7*#nws2^yHlTST zWj#jk@rZ^O`ZHM(AYJ!De7Z-8O%G3_W!}*g*{s>T<%rSll*vh8@-#$O=`238DnLXd^-k6NsxYL?J<) zMfjU#dV?JkG#tTUhzV*=_^+*%8+3r^=2;+LQI=yE0s?tU6iG7h>;4R(auqn|-LbVx zXAj=Nj+Bw1I%u@I>ryp~bPI-UD=Nt}m=-q<9xQeEBGk?Z%~cRkFbooS9Ysw;%s~}| zlaDAxgRrqbD{6rIno{k{gP?6hwMQE~;D!Baq>2;4GJ;W*&HTe|*MmCP5)`2Jqv*hc z4m`Vpw4Fi56K0`M!-?2B^kM>}9BPQhNgW97qM9I8i?U&zYw;kzaXHZtLXHht2*KI(Q3!$jK1DnBJA`1RM9ap&uF43|-44x&`vc!V{Fb7?y!Nbrzh8mKiYyh|(Xw-=i=Tgpi)oePo8r(i!;JZOfR_NQKi%`UhxPCY1imY0Q8lmE9JrL;Ag0jQG0agu)M&QBix6PghcMAk~tru#EJi3cF zybj&S#thkb3fkpn3t0J%>{hY0b6cBU>|KFuc!T=YPxr#IBAF0Sn+|krS3w!6pLXh| znN5b{gm7FV<85vW36B>9G<@OfOPIP+)XvWUs18p zpYF>&xRt|iCCVX<@Qa2Tf4C#~pV$ft?EY+Wpw~ZiTk`-L3uL1yGy;&_+q*2IK~5a1 z4KaQzvE2&<4BB?J^u-I+KFJgVhJp{xUSUJMo2?*q+Ion!v!yRA=(hd);5IANspp~A z(RN^AfoKnux)_EnOld5K>fA~k_P9`C%24k{NnZ#dkb#3)K>^s6LqUk9#i8&gL$*65 zjTKK_nw#Qm4GaiY6yuA+QYWL|3m3)!)xiyHvrG`w7bb{O^??I_5f>4~;zht;DEAvg zUM!3Xq;~)s6v!+@d$9{Z!dBKl*C>SJ<@v&i}gt6=7e9MlFkV~%Co_7G6vaq3>8=m%iTpW)OJH2itrd5VC-$_^q`NwN9bOoqF`wew7Ll8 zF;NjReP0Gan?j?jX+yGjh;o9JJ}N3gtR1?f2q7besA>|W_J65Hvy)yM-fJ!?$nJN# z4h0~@dqM*UIW|-QMC~n)0_YE_)_+3r!wLy;f`&PssD15GJoPW0;81Ra;E7hn1mwS< z9OF>~2juY^|7b5IMDR4S)KxEP2S(xb8`uIUq75&kc7K?gzg`qBjN++3@id~;l~^y} zH6(*~swEU7B6=75t>T#B9lcYaJsQ1}9l73aN`h1Z{EW;SDdvVw02)#DMI26njp9j+ zk4|_RQPI12FOm(T`l&zM;Sm|Vi;reMI(nGWS6z*a=v{m?88X0l8d=^|FLEQpB6=Sg zTak#5rY1VHg*X}7qtW~CMen1@2YLf&WZ9Qqu)`xI+M<&}8d=f%C_PPY{nQOlBT8Kf zB8`ei*+wxw`s%BZ5xxrwjMCQwq_7Y`jWC7P3p#s92Jqc#=SN2TE+{ZcA9tt=K>;$h zM?BPEEZms08$bq(*5)Q*_!3=}0|x02hjK24 z<#wWo6NS?LKUt&Riw2~YMzjFh_iMA6OS=2Bc zFh&k?Mltr2O&*0{YPbhIM&Ip3$$OBR+gLel1y3ljQW!K62U}xXImklB1?nI~`38AP z&^K$KRBWWJtVnj?fSxJd+M1|`9F_#}z#CBo>J9ZCgZN9flIl1zC2J@EGZ&X606{KLwCp};$U z1Bk)5AQ8=NtRczv@0~hs2|hrri8ey_0r!O+7E#3EMxrw4d_a-O|J}#H!X$NnKL%C? zyc$LHeHSzNVyGPf|LltJRb&XzjQzvyI?e3KKgS3V^}$RB14y%nJ2KR$+=;C0MlvYn z!T)Lz)Kn-lyPNjgfZs^~&xSLyF+-FJ-jrx)4yhfWk_cAdZxG!;)We%f!+$U%>gz-M z!Q!k4)TSZ-k?kBcawzhbxr5#oZrPcgFXHg(Dp*>{Mt>5zaP$&>`F zFtGe0mbN6y4n$Z319HUz>JVjfB5<|dZCV7W%pqs{h3Q&%12&Zx5EFx~3k-Sq+h5a1 zoj(1j^;g<}7vmF#leyj8Sao=BxIeW1#ztm%v@9w}Crl2e!Vuyxm&U|F!8hNX@?xog(I*A`Lhg>Tdt(^%x`D0iZ5JfL(%a zDvr+98uB(!xtbA=VMGp_0J}CpV08!3a6_vw@@gDrkLz0p8XWiiCPD@CG>&M4NSZYJnSI_dsrg`QiEZs^GJi#$WmIpSgJVcOOaF9zZb=z z()Jy-5XfSM;cW+zGW=2ZoJUz$j=Zqv?6yAWFaj|H4MYZT?}Rw ze>QM*w>EIV{kKCd3W!I;cCuOR?Vg5W)XONTIJ;2*{hqthE$=n5ED3euV-!yU(!U3w zZd86ZJdLP;U5t7y<|v-%il>pKt_A~ZJ-q#=ZgCaLA4f7C=u8@NY=~w-#0CRpJ)-b5 zWYrZpHD^VFYCnIvh6GOGDOlkxjFd?F(7Fz)^U3Gv6Y(GwY6kroD}#LLTwo$DZB0Vg$}3_9pPIe6W`F5s86t_JPv%K4T)rGL$v665H3{dav~mDTG#b71ClAc zzy`jU1>O+3BgK(`4yc25Fw{piiTXB1lpBa4+yQj~b$lqS!8h(!MfKZ-qDX1zRxpa4OGY{?K4~u#A+PJ`^=i*93ov^w z6s4f>w<|np3I`76yDZJ5bs~l=#@^$x8^Suic8g4UUE%r1ixufp&p=Z|lyT`otWk>a zEkHHCA*Evf*Zn_wD(MFU&_hXo$6?fcy46t(`Z9l70;Lb+V8a9I12Mt~iw&C|SXJ~I zlzpEL9rFTwfTqR>UgJj#0llCa2YHgATXG0-3Q#c(23cLzC8$A890E}dt%2(Wr`Pv^ zqiOERkm>ux>}ID><0U9g@mmCj(|G+BX9l@Kc1A=Sg1QA>4|0O-fM*Ymq1KM~oZ0!^ z^71aVmpwwQ-v=DA5=f8%TL`d03B}5z0D(aghg|@HtlS);6Zp4Su3i9QP&D8jIQXF8 zL9^fCQ0lNtOKQ}l)gW%LOX~&528v!<1F;Z@TS=`mkW~X{m4UblLsVl>*GUfM?z(>) z6NrC@QWV1w6V#QB>hc;C1B&8fI;#bGVMw~SwK6ck&~9s`FS1iaSOmBwa99qS*AQT< z4##q zbWT>nn?U&m#KlHwjiJ!RNid+}Q|$)M%i>5CVnn5-iVqDT=1ADU56hl@E>;X_Tn4lzESzOWen z8{W}0E7bJsP^07r78Z#1K&gviNXjFW4~jGC+*=CvxKL(isPk;}g%ARn4ub=`awrJV zv^YqJJ46i*r5v;i@79I39n2fv(NvY>;HlreSQr&FsQeC(1MS5Q@eDA;#P%Z@+o_aB z>DbPqd75+!eRbSO*zk^)Qoz83EXo)TXU>iSqF#`8i1PpCq5(m2SkmuWbvUthn32T` z(*a}TAZHX~l*MHJ0h*=7SP_-z&yl;>gw7FS|x}vMjpp{SQ;6jHe8IGl{VIE@HS2_(m ztT_;C4=WBtkD!b{EmRC~G8{`yh7Yym>wuzyMvi1SqJp}{cSx;>qAw&6%`mbQ14>KBx*nrF+!9CbtP|?~j-?-P_ z(((PUGKzw`exI7Ky+9T`p%E|~S4Ig@ldFA0z@TN{>5CVtLUoE1QShM|3JrB`UWl58 z9b()(ePJ;&S4Ig@)38I0oF7si&w?l1Z6rDf%b3ae&j)Nl`a zjJ_+Q$jhnBd%Zc7HBc%x($IcoU;&PvDc;(es7G#d3l7T@O~EE_1d9PX+^1 zf<1gTu6xg9F+Bq9p2<7KgoS`hhQ)PRhDBkDe%b^M364m%R^%NadNRkz)^5L6kmOYT z-&2r%zjN0jdN34t2LMN;q`-niG`F#aB-_7t>U!7C4TWHbMHF$kk*EwVb@(9$-X z6~UV5h^H)bgA@sjbFlNFFpfu9796466S_dRcI@!}jPQ@aX>hg9FV{U#w0M6Eu$g_w z=wbw5tO~3Y5P=J_;7J2h%Dr~B`u=3-sfC_Sh&CY^7+4c*$dc`kDKzAej+h^~n<@|l zx{9FJX1sG>$E^VD5PYF7tiRghaiuVHVuv9|zI6T!`jae#9q5W(%sAZkm3$1g;pDrT z>r)$gg1J80%-dLzOb9Y0QxcetU|UgaN6Xf~Zo28yO6RZ5wn1XKg^`3WqJ${gu|K9e zTDzb>qcsgecp!x-^nHqvZMUgYmPpCv&&oG@+BXzjt#|dBkjfl#wqKa6`ulESyDf8t z;SM$l6%ZTUB$P)$n(Q_LmDB5g1L)_%$Kmx%p!zg(eZ<`B6AbXSuGkI9~_N7|B88l7X%Nm8t%3X8S{6HfnfJgn@*20 zY>WZVaAcwYHZ3<&BI!e1H&q8P@;UlMyrC7|4Ei(t8<0o?B)8m!&qla0 zV>rer%1Ue4>wFDmvY)x>^*)tJxY-Q<+Ps;fy=E8BdBB}IF9CU^(|Ll+PsXvbK&B} zi^+})4RMXz3$5>_=bX(*Byexz7{h@6=b~`$lO6YCH|a6s*P#9};ud~Z^-gQ+=2+iUuC^R#67sgd>NUhF~~vc==96nDIY9_{@M z2G$zM|9q9fGqs*r)Aq=_xvnvJM%r;sFFUO}0Y)#lBKeazv^^)^ify@WDcFD;zi@YX z%$4?}nsu|UfX6uQKW4tKc!ipgMs-$U%cPP?wr6%9ysBDo%`8gSA(Z(>NW<$wGkvG< z_Bgg5g>kB0&Q<>t+p*fd-TDXyquTOr*0%lA*rqO0u5)cyh&i_MNhw<+kIB1mvpC6S zhq#v_+68F=s<9HECw-ePGk%X7_ZUu+nt%Lp$+HcfJ1(twO@qZg# zU+4PzwxUC&s-*J`XN@9L|EVkw#*Ss>h-Bc5J@7Df$CbCU*8j`FxFKZAbS_Db6+Vi= zAz<{MPRX{)eV+Wrlt=5#q1_G#pYGfD0m!>mdd>8j6-CXC-)`f!aTH(OKmpPrE+Cqj z#JX_|96RPRG*4prlqJK$7W%*X(vWY-i_X5@^7#Jo155Wbm0h1$oPVPtzxqq6)PdL1 z;s7l_7}unfKbg+A2VjbEtOV<_HB)?ucX(K(%o?Udw&wCiO%J!w{$~QXkIj-XoUyCa z%%;YLw7mWdq;}#$%&cq(qKmUG8Y=~wp;Zf-}%cu2e+$M}xxo`QO=dyV#)LmOIIp|?4-WXyj?616*>Jp4pY zNDIuTU>f>g0bb}za~Hd`d=*ubPinpJbe|_Ala7DK50M22E>Ah=CdGCjT*L(T?A$}e zHX(yUv&LV-jlmT>Id&+#;k5u)FiwO$6jKxE`s{&Mc-w2KrH9WP^?4k9$dPd7@P4mv z@GQN@GxoH{PQ062B@oni-^6U|RASb%H>T0^Wo;fDQ^~2}Z4dD=4XibrsodPmQ_Gu^ zvlTTmlb27ySQ>e>`f7YHMqCud)Rm-RG|oK#D3CIa$G1oO<%! zntvF`Z?fjdct5SZkH#`20k~JLYM8LvT0=5r^Ov12lAbC2xuyaBL9U4mk=2U@SofjS z%i@0l0;WeTjjCz(FiKE-?vC#ei<=vfX-&$>heFTJNr2x8PTn_`td`y%8>43Ojtd6{ zkR3c+Y{aI*szs{Jc#)H4i>*Gss+E~adH#<~@3YxIo`_Ol^@FZs#P{ytv;7G{LE%|* zZdWhHOxedkzTC!t!9`X~W35^+cD3j3Ra}ml=JD~*Qd2MbhlXzFT(V@z3V!}rkIFOq z!^Z7dvvFf)nxCKFrOeDsZ)s`iWU=|aX0mmlLj@0taq{1W0 zq<5!e*6+z0-@5tzo7_C?t7XNj@+unIPMkQgV4jeXMU2e;Y0)QzO=fS4H{X6i>*g{! zIXP7ei!1S^ZVlr?)|i{Gj6Qo--Pm~9g4K=zyDD-6zt`vJ>aMo=ywU#cHG(L(`6Jmn z2RFU@b>PnCY;J}3*Hwv+ZQ=_*c@HzLN+X5k(4j-~H4`1{56scsEqKi&=d#AtopPyb z{*%2V{rc`7&!iYyat1e(kV{tobKcxkPgoIpmZEvds|G9GD7IEx* zA|&m`=)!#Q9zVE5bM)IOi*8);J+^`=$nY$w|AKQJGR)}r#`ceuj9C4kjBoA z(N8J4s2O|t>GS7bFDjfg8aIB;y%6vq&xvymty=)z%r=1`=Eea((vJ8%sfu^Et)GLp zc(6Wj*({?&0=nL8@83NfJ8t5(@2#~N89+KKWo=S+sz0<@!;uLdVM5E~D{opq#IG>e zxZvzZ<&^lz%*@)NwqLRjM9A#_@`f*%VQk@y<5QQmX-rziU|kH5jn$ix&edDgOx6}dOd3Y;pVU&m-?Jz$aA(0b?CMN(l> z&g;wD@3NnLYZP(d4#y4&a5HFT*@CgjuEr598Pah{)`815Z4UXL?S(^URrK?3TQA|; zcFS+`r7hA-OiT|A(?rd31HNRRmjP$JToL#1?7pL$dG6^>%m|lKNp7uSt|&-6*>owN zPdjd7tWKiQ`+Iw`6T}Tu&LuZ#xUmZxZl4Ao3h&N#X)?dU?(3 zFPgl?lj4Y3249ZxX>+|&wtNvO_prH6@A07{4<%NzR`HCTT^*a;_CL$E_ikgyX|V8c za&qcH68AES`10%=(0V)m1%l%yM)Ajj$y&p%l`0C1AvZU-tM+vRrA^A7s@6P<7q4@u z3Q^9wv=uA7Np<%41o<5~m$$10Pc6;J2o_$yNh;(*R*ZIhHkjJR7nVf|FuJ)#fKiz8 z$R|Bv!x)#;R5!ldgcZfBM9tqd?REa%aQ@)q#`2uQ^QTRU(YiVLcIE8t9BmSO>55-p zL``~Iu}kZkzSo8G4<9`ud_Mp6EC0{ST^-^FKZ$3Dna; zFjStib`k)fH^*I0viA4bZDC=dY;0_N{@|?TlW(SrSBI>-uJ>e`Y?0@I2tR%%3y&x{ z@7Zb>e2mg1NH|D4N>y*lJ8>*2M66}vSmqgSh1afF3JRBw=ef8Ts=r-Xjf+O$w0S zmDwv)fb41(zb3e^D1I_^p2!N~<3n@*}Sac6}L9Puqm-!aKiz}$b)gk929D)<^=;CUexHFZno>L^MeG3+P2*5hAQ#n zKJ8y!J-0UbN5jdyYunyGdz2VZohN_in9Ff8qM|YHPa0S(aX9K%gdI0Zp7Z@edjV~ zQ?f%_Z(j;eI zz9C#P)OWr(m(9Vnu+>iI05t8z0<uIUSmR7HbV%!;B~tMse|wu_iFE?+49ksQ??amsJq(c^|};dVTM z-wvF-bSHF8OjWY;N8Y887qkU4PHq7vMLB5YcB{LpCsSWv+FA>g`{omCN)9^gb(_m% z|A2JDGVGrji~?ZDF6}L1eB7ol!rl{E|sTMWR~vYNnO7 zHLs9Roq>*yj$-Bn;8n4VP5g1p!C>DJH#ZYV#m|*vd!Kf>8T@C?RDHX8(*+I7lbikT zlqwxus=bG6d45Hl(#)kBI6k(j6pLLw63K0%e*1!0(zc40TLCR|w?BQOBDg?uW!$p% z4Gs-ow?GpWm8~ zPY$02uEi|x_n*(WXq1N;9+^@3AMQg)v#zWC$#0i(=7?8^Ri!FN&EQQur(AhsrwJ+d zhSy@1unpRArj~ik9tqmAUok@G@e0cQTsnF&{* zzW&rgM*BcFKfgKk+Byq(*(Z%%0N_zJ=0&6uBXe_R=EjJr?yNPrn*C12G#LP1w8@&za#a|8LMjoK7BtyI;;@ z71#gE3?`}{Q5uMl`9lXCKTI*`Z|5Tf}kGT!SJ&g$kTdT(Qh zvh{~#{*Q_u7haj|{PI@t8SoI9lS8gS4HB*YldTX$IFn766_UXKAB;?c&4E(kmbL@#wvyVy%~bzDQk5bql-n*3)Q*qD=#9P2_2Y ziy=H7tqqk8iubqAl{U2G_qG$^^++i9Ox#(~;x6hN{y&+IGs5|sM7i28zytcPQ7+cYI?;4m-PV}I|8qJ_yt9k zm97*sdi9<3ZnmxFK}Xr*_HFOCz7-e3KgNA&w3%~odRW;|Rl`rNkF+U)`b^H%E zH%$OzrwJw>xRtu07p+Nk=6^LQ1E_@Lj$wvuxtU`3{6> ztAkh`X0!VS^EX$i2Mg^!6)-O&GQq6Xc>-I#7k2i`gdBxg2Tw0oHz_tsSkXYJF8tEw zbbjkL8D1_a!)J`~W&d+@DJBcxi{b*aJ#HVs44b-UT6X~i_-O_!pD_3Tl(NRy$J;hd zwZU*=VM`IW?o)0Zzn3O)?N?N$H>C3luH(#5-L0JXp~fmZv^@I)8#j}KrtbKJAJx+n zEsOSjHd}wnnYC&e6igN+z7hj7A|hH!u6O(r!BB4L;apWk z6smr;=cP%?5&ncLC9QVRjG6~+R>lZ(v2ZThB0A&w?D12esCJAg&;))!Vqne+G5|?k zwtBzy9shTW&>LfCvo+_<*?aN8U5xOVEM2Xc2ji^kT=E*7h^?s^SyjbvCp=M^9mLJT z$=|>!%L}5hSK$3OajWZ&#p$fZ5~hL+w{FxvBjCz%uz?19fdGel+0LR9CC-@O6w7GuUW;Zi0Q`9GFZ zVoK&&x9rL@X`rVHXy_?<=H@j(Kb_;{ES6K8uvr{ZeDkOj-#^ccI2WB$VV@(+T+U5paDnKOF*0*`i^nx9evCZh;Wvk8)|L`TJCmri^Nyw5p1V1uh}*IQD6@P% zV6nbV<%~_^0FT%d5!XJ$-tghU{p$lJD?d+7b9(j72D<)C0h*6nQ8>Q9*|vTX&$_^` zYK>vRgykB1Dx4oU+g3-6neUu_bN-#s3 zrt9lxdOlwsa3WAHBHBH=ZA*3rFV=%yqGqpVi>3(sXCS5tQ#ko)Nc}rM2vm1^>;73@ry$0>)+xz|REn3>u$pMjN*0yEv_9IW5J;o4v?=)6Af*Q;uG1I)K2wt^A{ahqnwbmEiIG1^!eG{ao#P9 z?Xqmemb{!VWLLYHcXDBE(qf@cANfBU=q$W>X}kAgskQ^zL8@#H+m{$bJ_dl+pTf){ zXU7@d=_;DxO$$zFJ#G$9vE0cmr||lW)uu(Y%1dUO*Qi zs#6x9@0)YrvrPPqn(IIjw6L50cg3|ts^*<;o!QKo6Z`JD9&?f5Pq){KuTD{I;xx<4 zR8RXBm}|D$IWB-J*?5ia9qwyjicP!0S#0ZeVDY=@ncGuzZ{G@9Ie*)_o56LKdE%32 zF1Fa6@m(YCe!flfy(tIH2;vo=Y^}rJGze|lZ6OkPx#3D)(v+Dsjj{>~$@6B+PdR>Hmm32{8*$nKLj8+bD7_|j=BPA7!a+9>2}OJ#hp zQhD#H&@Lm*8F;Q{`?#`g&&E8vdvu-ML#181Y&`!mwC9P2xpQ4m0}&5ScFOb`XD_2N z5d0Zso|49WwAy6U*4hlBvmGww;#`gyANOt0U;{AL159>mmT62GXYqEq522+ef`U|} zrCq-X@Au1zn@(~11Q~SLYOWnm{v@`hL@!uPR%3uAc%tfj~<nQQ_RKXt4|1E>HBF7&LpH>X^W1>51z~SB6IFl{TuH+N^fGVOi<^ zw%z2TYD<0m`k9(i`;0exMOFzgI+R9R@|FKDRN|!a_i5n~t1bpcCEtu)p5U0Uw76L# zII+;ReRUa|mt7dE!l%mQb{CfucITQa&Wo_t++V-x9EV2&>*8d-)0rvtEJ9ed{h1d zXL0>?aMWeR3$+*HZRXS@>xiu>XSsBGmrvcQWy?2Cxm8xma$eO_VJSgV(Xr*j;yoC% z{7=mv?yRoU$C(-bqg`NjQfGRN%9BTr_?oJpEaJ|ewnlG{)3@78^a_)l`J2mM07J<> z$4%W99s@ScSk0Lro{}n;A*W>KE-luq&{{*dzieaL=Y8)lAHDbV#$KDII>ieM-g>jY zzHs`Q$iYPtE@3NAN2|A|`@Fkfwa+g;uVT_{ew}w~)7bpYCuzx!l?=WH*kq|y^L_71 zm^}!p2SAKw;f&>1xqkDLa!UDJPcS($T7dq!D!YE-xGXY&Gk}k z-U}1UrxrhSx_8+6lhn54{b!Qiyfic8XKZ#_w&}tRh&*PSwtuf<{TI1;BBo1)d~e+n z11!ocpfMDmsMZwLe|hEqY`(aahhyDm=@1e1WEbb4dVha^Vrx_N1hdVTx1A78imQ3D z4Mb=0>oqowRq<$DqI)UY_0XLkTe9&o@-H*zOy5)=8&`EXQ7SVxqfJRM>L}m$c3zCyz1JWt1^5U52^vrXkR_G^tsH&)zO9J3Y80T=lwBWtN19 z=G2)y#ffa5nhou#*NhJO=0%8Q)cm!#j0 zE;-3?=j`hZZ96$^?;d7(b+3eJkFeD9h_FV!s?*E9u)YX!HH*6K#bmzqTq5UV6(^G| zk|is1C5^L7xY%87tKR=N<5PO*JT7L3*?^M0)24QH=cCEFac1+1)Fm`9c4*O~3Z> z`^@tpH6&8{lR&@5%`XK`YxrfoTE&4nYgF1#1!l&X(eaZQ!!+Jv;{85s`wTQe0D z6kP8by@=c#vYDH!n1D|&3gzFLz2DPw)>hY=;-?xHH}ffet$kD`JZ|EYH^+`0d#yE3 z_@l`j#^&htUM=<9EqR3;#omH42`B5%#`_sYtTOp;%DRQ$@63$YET1_iv0cP-f6y6{ zdQwD8Uc4X1J@(M%<=pSKwTCksyp&U57xps29p2@5D5M0;CpThUz{LaV<_fd&qJ5_b>GtmF{u3V$Cxo76)o3U4B@;E@gSC z{rgN6PZz=Wy8Oy#!y;!lKACglM^4;=vWsc)F1G&Pn+xA!CmeApX?1W>a9xcHk8QuP z!^K$Ab{%JB#nJ?;ZJMsy3Uk`NW5R?kKi=XRu&MU6<%>@~tTo}g%Cp5z%QnSxvPG{i zY=7x@^t;qHY2Z7}u2BNcdZ7jXHBHFBt`7DiCnlwgj3rBI_p;R1J{4Kd`#JR(pA)|E zTmG!3clK3*YhEevy&SvZUmbWMy=Bdm29} z*jgoe_PE7u>(Y#M3@1ISZ#|7&XDh~Vd1jif`8I_b{&Ck>!!T`Amo3_=GyM_2MB~wa zzXnzi2!^UJP4t#lM}JUrYSpQ~Abid;>X!T3?+Z5VTp?g8%MPyW4b5hhjZzt#Tex__A+xFf65Q&|B&lAj|c78+KtL3~u-o%#wukE|Pg6tD7tH;v;S92^ruYjXCpKDp1TfB#aU9NHN zr5n4iz4fZ$4gY*cEz?zM*ClQ&7Q1Dxs$Ke#MPe4G0@ghsG#!nqX^pR`kF@$4#ILKv z>JwBR#HTIN_;_l&lh&395QPI~=NDa)e@ ztp2_2d|#JmPim0$S}EPD5inTaw39d8&%;TYWL#Dh;c{`bk63;~5qo;o zLH4JUK-lIyCh^uv5J=CJ)kw+toauGJ?(^vw?9uU?%>B|D-o_o_xBJL})s19xNcV3q z^5#GNzr&I(Mtj!IiQzAN^YLu{vkIO_D6=na7*=#PE%;u$NO^gleA0@;J}I7lqTidG zA2zeH32$7fymoEU{kJ_#N&ze?24*Qg;*EymwW zm99cD(7cVav%qsV#uP65;8nQnas{d7Us;{x&22lLszomos;J%eG_xjNYH!JtS*d>9 z{Kg!M0{8kgvNc8C@$}0vU?nj%+o!88&YRcVtbsG}O<0lLJf7X*rw`?dd-XZW^8qz&+4Z6|dPA?zuY1+Sg)E zAUA*MiR)KNcQ;0gyt7)FSd%?>tC980!e(2|+htsA4&rBZ3npb=Qml3NHN{Hg3B9b*6Bwk0IO{*t{FW88JGETEV9;?ruJQ{2#}% z=pYY|1RmuWJS%IB`5b47SgDgSP)=`A6?al_jbmvVa^m^N4ZxQ>0vglFmL5k$o-qrz{_ zLe?)f9E{n1{nWcTCP}AM_sVD8XjU(KVSc#fITzrI4%tka>U3Vz?E9zX#PrqS3)biz zHB>YD9OJ(!AjeBj!6^OYQ=tz&;sI?=!KumIQHNG5{pWC~V0sM$bL9J{ml+7QyzWcx zesO;>-N%0Vkw%*$)|#-RwFzOdVPF5(Qc+P?C!8yB*D;w@qia{`_;0f8#)?~E zuFF*~JKDZ57Z-IcsyE4TkIx7eKel{A;L;;`26e}CnkwBK^W~+sXQZvVqnB$^TeGx% z#as7JQgxMw)jgyg&z~ysXf4;%Ju4XN!^>JWw`{IZO|)v=&LZaQQgeoWr&?)J zeYw@NX=93izPZ#D`|2Y3){RR^x*?mhl3m-w7fgGyz2IBwyox#hb9s4woNV!{8KP<_ z7Z)usHsaYB;1`q^m=Tn->VH!DuV=Hj#A~y}Jctf+1BA2LR&T%LI6Fh{8S_NV&gEa0 z@i1~-YPV|7yqwsq672aL_2lp0v_9Rw?oe^e`E%Z99+LZ~ajeC9wFj}mpBaK%AFcVa zt8ioZn{e)$bX?e^nsn{Tjjz}@o&UkT;9t(ds&5l*S{@SFg{%ZT_w8fN(Z0LWN9dEI z&zA{@{OovguV26Zx&Ac1uE{@RV?e&L|C6K@miX^13k3HR=nC!ESt#sv^vRUK$flUK znjH5WakY%dFu69Vi4sctT?=$gJqqV}%ho;Sy%To(-JC~AHBY;P+J&` zdu5!x>>5jAPQquOETH#OwnRlm*(rsHT`fB$y;t_2lZFArCs-Ce%qJ3wX%fcw#s`8h zNtgjdgc515gAe!xJ-Jkxr3>^_%y@{Y_0~$RC&WLI;y4*&_v-t%s8y?4H{F;QQ0sQ@ zba$V-MJ`@rRd6?iyZw!SXxaCGO6JHq0Hm9mqPbONL1UK` zi`S%c<-dA#Ka{)XqwYyQFDo`q*VZ8Jx~B@M`DZ>EY2AFJF+0)nLL%pde=1kY-e~+@ znS2&hCSE+Zc(YAudZ-O3!?6C~wMs@A9F5Zj;xvklpd4h!$HMI|LA+nzPomWta|tj=d|kd^^~ zC=NuVou@6n;M0FiacaH^TAYUu?Edcwe+RSUvG9n4??WlT>Z~a8Z(?*5USx$m z2CQ$+GVrW5LfR%_lO4kLANwrfkq{U7emN&UQyVFXsm-C(PU_h0sC7~6d|GQZj5n)t z)>u{C;$syzX<>@cHK$Z&2o}KqnPIxw^-AzD*SJI9%v?zKn3^M%W18!tN`$QcyM|NP za8SOudCNM&|E9U}zBkES{&C9IoJdjS7b*LmrCm<9{+fDtRj?>&A@S*n(vYVA_D-_S z%V>7`S7+jQ&e+N)AhVzwW?-V^?WZ{-G$S?j>?sc7-eenyMxn^_rVF!pO{9MPa>J-o z@|;S+^Rr{+8u_;Bq+l5ep0~VJ2+t~xKUx;Ll{xx=1hZYUt$6+7Os<;ekDKQ^NP6y* zOSo=f*Cv`%Qd`=5ROR{+m6*5JttZv*zI3xr$FlV0UEQ!R>kkx&y((aRb@=qD$E#nw zcv1QI;lu1O0X*(>a1?Lzfv1Z8iwPm()}OWjUbnU_WJcw47M#w5gI21u;74cS%&=az z@_&W+*AibcuSoiQeFONBvlBSJ9G1OX`;b(9Uf4le8vCv6)$*kmTEa8Z*(zp~JY?FI zY~`uE`#M4Gorl+qMbhMJm7JGTNV%PUKfMS0A z8N}Wed2wbk-;%zYAN2ByxN=3{*|TSnSi|&86{#PxCq+r=&5g}TXl!)>=$jaZcpZ!%{^v;PlX?&B=kcO*HFRtP9sAmU}Ic zkq!Ua^6?rR2FWvxRw?feKk_#RTN;Z7tXXb9S4$M8>9fIx7g@NE<}P`Rlj+qD{8vS@cRW@ zHq)%2OO@K|S34eC8LfL}e@nqS&Ft?-qiQ*M)~lua%$qrT{s#5CtL2q0Xkvx97NvkJ z$IbJbIWvA-a;@>=H5fE?gm=22jPdYMzIJY| zWtb%_I55R*oFm^Y0=d$gd=tny(!6A*E64d&FX#u|Y0O@s9bxQ%OHXKj3n~Qp&$KBo zVTo3I9vYayRdnJRIj0u8B*n6RS*T*hDes`zm(iC@X0p%Uy*{gS#;bT?j7G|&FCdAj zdAg!_g zGP6oeLp+W_*s)4E_H@?F{d*sMZ+^bve@y4-Q^7R9UEhE9v{kN|A55pL#+9yda1&*)|Rk$zvn`$JjIp%D!Uz)Nr z7c1>I;{MQnbKIBg;7xaDZ#;2jvIh1}M7wtS`?(<-R;}Hy5`H@ONa8s>pUk2u22~pp zv-1D@SXzeD`d@wJ*9MQJ?tYEFQ!|$_{v!)zurs0A{Qa(9j);@Vt7+%bE_X8dVR6T1 zO+B7=CS2;Y{H%o$0o!@X%J!f9Co}5iohFO?g^rjA5#d9LHxBY=+7KwoHV|0iZeeTw~QK<(MP?XYVb5s_fcu zVL((;Qd%kLQc}7>x}`xyIyT*@ASET;(j_1uAh}UNHr*gC-L>gHYx|bp`QABa&YXY# znSbVY9EN#@y`N{TXT^PA_jO(COI4C+jBB8kc34j5k`thzfm9% zx8iMJ{^FFKM9wZcBCBzHb2ZQ+a+YrDyFe|>NS}>Y~F7zD*MH=GkurwvdS%fF#0n z+4a1tqK{3Zg5+VvI06@Y?zs;soIMdG(N}EZlA11n2WW`jbgA)!hvw43u9yqL(wvJaq*>P zPtw)|fUA@;gpzA)#^+rv06>M8g#`HAfOpB4GYh}Tda!m4ddS;a5>W1lf&n!(D~49~ zEFnMJjVJ;kU;%M3A&ZU@*Ry-KY5IhjnZxLnQZyg>`TBsWd|XDxc0%}!#LS#Mg4X|F zZzflcU=35c$QpecUkFGs6x0qred1=qH>|PI_1y2_lOz3-xUpw&`VQh)X}>(CGgaXZ zc*(BfQbry*aS$V1No%g8yhtdA&90B( zry4Fr5TzBgeI39`Yyg?fQiVzTpmli@y$^DcZdLw+b<>!7?^8!PBo+usC$w$~fCLW) zI*AMRx-&(Mtt6TBPQwE(x{!9Lqub7R=f34ZPv=*6K$|fpV!JwwxpfZii~6P+0krZ} zA@A|G1@Kxz7W`R$;nSg13uK ze0@%K=OpmFwZCMNa+&onbtSHqlsCT{)Q`tI$bO}^N$@zykl!BYvK!JUP*-U}h&;j!7PX$sq{4^7p&58q&s(u8Uf z4RjXzIy*a4i+>=5U(kt@dL-Q3+9-!A1N{cm6ArbR!lZ%{iu3I6%W2^p$9+yO_SnOZ z=oAv3EKodpAq9rTDV}PWtuAbzgP>bhIlv(@mzalgB`TrB)2sn+zc02sl0Vb{7futW zdJ7ec0nnj)R%@{smWH!fsCOuG9f>dIS_m;cjkhrM8%J zv!0Z@_%po{SIOoXOq#(OFLYSpN>zmi_qA!pwAg9GZB`;mIEp>Clr(Vfu(lK)>ky51QtW6R zc=C)lP~H&W62Z;(_20zegL>Q+kJH_Ym>?m(o&b)#%%!ooW~H@~$AiY2_U&4@EUL#X z{N;EjY{q_8A*>bSTHj$P<#yBcL4jH2xZ!9X{N`z0ju_~fFBiPOMPCcMZ5vn`Ioj1Z z8kJ?=bd&;UQX0`hnw`2qhDfcUrj*g*1Jae$g@R09d7zscOuQ8t!gzdx5t7z69cMjK zOed|r$W7_C?@-e)?zDJHoR|ZYvwn{Vny3JWFpC0>L|#k>wqMx2Sc{dnn9Ze)=V5NF zX|OAMqynlSK5wd`-DY%HZs#msT6V^cE>hFb?0A|2anTupQ;DAfx(Cq*AO?x!6>yB3 zNK6!@<|t6LKSP(zB0@D=4RR;o0|ECu&$5NH5k2 zp9T^uFT@O~yL+d7eNb?`yWzdy+{@**o#{9F!Jw{EGm-;TT7-lyYSMg8^Siw$7f+d- zuEJBJslD zJIxOCR{K&ouiPXad9n5V)hzt5V5#ql&A}=Hec3*d;7gHJ(q(ql$Sy`WSsT&Ex8Vs{sC{R7q5lC^h1J>cPPRsSr7HhQd`}1ViycG8JRlwxhp= zg=|P7=1)tusdz$6FrZ-tBDIHD^+?EDs6zPgsZ!_}+ZL?23_?-gD&$v)|JUV0S#%3Bo+)tRoP?zC~h9Z>g*o#p|0 zGeT&Ah1w`B78|`H{srRE#ceT00HhEZs$6A}OF=?8iyFu}t&r(@Ezd%ApaNCgN7Y$S zjL7ho_RsV_zB=C_proDvI=QvA04wDJCab9s*m-C6Dluvp-FscwH^}p#(-aV4m9tUJ zK$nzDne5gtcCnSK=F-=J>TmFMSPHMLJdjvs$;UIFYOyzc4pkd`f|4c)i#oYlN;I|r zY=FUvY#=3xiQHdwCL0JA4t)!x4uSZc{($rCj0cAo&edYo8n?KhL6>IfYd6it=DWX! zy8_{;`qc%1zSe$Jm^kmvOV5uv1o#u7G*;A01Yu}4xZ2hO(PN&_Nie1F4;Ev50DT02+jH1%IA{8HBqJ*R*WO->R{$RoK*(%jD zelQFYxR~g334mNDT}*(-WL?7=FhAKdg*-U3U$lkIBn#`^L>8rMQJF5$ty+%%@Zkfe zufR<#a#U71pq6`Q)|Tr`1^VZP6tPm0XO*?4=I|aMh<@j_9BdV=k}aY7S%7-9IW7sJ zVbPe345KcU0{`c){pk?m*1-=+ad*J5cJ^O|Af$zmB(S6!dmU}ArH`k68u{>nRbEjs zcc=M0@@c7_G>D~HNY;Q(a20HI07yvG0KsHf+7}RrTKpLTS59uFn6KME&b`vBv5^C+ zCJycl@RQiH$I#qSf9g&xgJ;Jfugwp!8@KP|>QF6X`xG2Qy*E!N*-xoSz3H~-P zpic1ZrC3WJI%>-7M6-`zTYG!MCp~%juwCJdN!=QoWbN$@&ohp3(OSTM(xKeMLcIz{ z^X(GkfPyuPjSg9BCMS|OQX#UKmywnyMpWVekv(GtA6PJ%u4YS`We;#5_&jUEM zT#R{SC5LXM6q&PuLE3}G_*wYrA&C6trUYacPj7iGcat(rIGi0=t`Fn{9|XNOX^(7u zk(P5rfj4)y(wA9q`@Z{o)t7z-?|EVbw0=6VZv8H*Th%+`Sa(um*>Z#a#E>{W0-mf_XU43 zu`@PpiHw~skVVAf!Y%2%p(5TV_E~7n6xG@?i{{-);(*VRU*3Es57&@4R*!@3=3l>C zNr;P+a(vU06D7iY`s{>bPX+BF;V}yjT@U*r)8ogF8F+ZAHK}af1Pb@OA;cPYq~k^9U(U8AML=`2lsJC@oE|>e&~RqDJ+TE*d?iU4B1Xz}2nI+qc)R zjK0rfMcB08D>w|&2)RI(phj=WR8c>Xy+qvm`ihEgAN)u^pUZr4ddmoq11$~8zUL^W zI$XWTPAlO&e9|+2JXYEGybz3(F()S{$qSb|Bz$N($zoJzrP+Z(w=I7#z{JUkjy7{n zntQHH0D`anEaWmOP4AmTMi4(VGa-TgrswB~`HLXFOX8^4?fDdaA$YWfKvlSF{EG7B z37fd;eL}9oA|4^ihUu&EI3^7N=Y8!-CBEAl5Su))# zPs{Y)Po&+DUOH+Zs~x01DaaEh=kbdHLvCv2PVmB`K0PrupiqI}qe(%v;-WdWMM@7y zL7jJU0V%2wx|?wQVm24+O~?M?Po16=9?Vn4lk#j)tzG?@hJ~cF;S|yQ={n#jK#8jE z8L=M$5Frf};U&(m+l>Pu8g}llS&b>$PR?hq*)I={C#=iw=Jq{?D8AeHD8^w`72{-u ziRsiFvq!fAI7}>#%(&OgEo%ajZ?Kp`>IM7C3-=glw?bgo9t0-~D;?&ct4=%9Wc{2V zcfW;#yiyy65AO|T|G4v$@b2lmA+xlg#tW9pUr#)5eA~Udv!}H3+JqYCn(l}xU>1L2 zQI5$E7fwh@dO8V0<6@A_jmGky8Qxop2=|kx5On=;z?Dm(n+T!l72T){gMlcjO}Z!E zK1`6OMHkjp#CAdVUNM>T-IHT(8V*NwWXDB_ZWwgfWn+|$hn6-cWtPnbiv$%VMC|=3 z8y@w}MVFPsA0geV0-Rf=^|H zaQGW_j~wi9eUL@btLZB4f7IOsnNwPL?<>R8*nCdCETL){<&qecS8BA2OAejndLCHW z1*36n`UwD)ahxpT>D+sqO4HvrXIN#1eYi7Ai4(y2#hg$Q2^kM1V6j#zun~U1**vx% zYUBO>#I_{mkRu;`DXB+wmm@cN4j{5L_(_JtORz# zr1^xSDW{u=S+wUJm01?t(+mX|F+b4mbl?>xmsisU93foNdQ z1$mW;G}wf{>x^&!7*p-X)=>^HEpS;3RwQ|i-h{Aj*-h^1=;%P$jMO)i-}FQ*n{goA zDp*wQGX~ozsX0HC04_C5#mZ{4GMJ}8yU6f?G#FDjz`p|ZvEKnOm1uYuA3u@?h&7Xf zo;Q$j`$NH}m8k~ao+3VNP3y;B{XJQ}5A(LLXCl%yE5GM~ z1*kn^;q4fXTHFJ2xPA=ZF0qN#PF2v{H}mYP-2gXN$qtydyZ+>~()%c!hz+CNlDAn` zYE?@VrLoJe#UFW0rQYE=Ah+)NqB--%L8zK#TRH;l9b?E&g>69-GvHN=NHPs zi;(_y6bZ2JZ7ldMBkV-*bKf->i`4OSeWb+NULc~*HfSKg|0(#S9(r~&DG0|-XE3fl zk5@BVr32@PtMShc3x&D4xiB!X&obC$kWf%iPxeMY?~|S%@2G%(*SC8^iT&TuP(8y2 z-!l5qq@@SNw@Zk=_FZ&h(VhRYS{WqxVISb1jAroJJ^+|!C;;0H*Y9gX1?4YazEqqG z-cPZ7OM(eT!gJ6nED+KO(E$*ViGOEwe+(2z3Y~#C=MsznF358p&q03l*!9RH;gC-5^d<@1}}WR`EZhbL@NJE_04v^3Ju+glbL8%vOon5ZrB z2|4ZtvI-jN&70F$=;$37*w{@ZgoLd)F7qZ2t`($Gg)DJg#E?)?-hiplk~CpJ5H!n~ zC_EKGuNv4Wc*mXcKg_}|!u$dMZI5N1g!LceBjCQUD%KK~YUtAPiXME`KmS{%06N=a zOZoQBZx=x{jsFo|#Pg8NC(WBolvJkWs@jR_zU%vI5LE1B2lpmW` zxRDBZsZw|-5Oe+ z<;C{)cH(tLe*R=o9N}oH@i}*8-oEUJWzbMpSI;`@O5`X27ADiY?!n*0K@O8QX5FO1;wNDP}2g4vtN<=bkGrD27oQ2gxbe}ON@Z*S^jHc1@cHxSH_?`kaAS^-iY3l}U)78l-Umk$cf_nP zUIRvw#bB;nP;0f2IMIAHs5`~!8*5$(%O0lhAI0s;zuKveLKZbe$a3d=b*0NK_ zYV@b19@?XBblaysfStV}1)?8i%d6`@&KHdE7Tb?nY@O!zb z!20?!SMAh+P2LMwlM+K6OTo%HF5GiaO8@uoPC(p{$>{~>?-v6ko}EV%<8W_Hmd7-l z3PfJkQ{{X0iEnqVodU(de}4riUMFxe(BJs&A#fSNdVZsAFp1%}e)G>m0~@kJGC?eG zmi&7|Mk53pDj8$KcOl>#_4oG2Dh)P7_2t%=e_sK=pag*p4V_TIB;)tTO`!RI@d3o% zr|AtvhTQ)&xm5~>!`FdxkT$HN1HG%Oiv^GI&37RmNh(IhRkJ5go;2@#m7u|Qn6Y-%UZ&CSeQZTjDTslIbhAkGe0 z7ukbkst7;|gm>>sfBEv|%9QQx=nmTA^0K<8r$Aml5dG%JT7nFu&KQVhM*yx81e8Fo zufNUdC`x6XPq#jN2D9L`v%3iu_!zsOL zYHHUuQ=c6)!P@*1KpQ=B6geo!3@Ct%m=6l-2qdTg{JXw^=gEqPi$((CX=D>CYb8Pu zPal11H5&s(>Sk?_^VxyyfDQPa@$L7wJpsnH0X){D{n6Bt?8Z%=E=p!*X2y55)CfUt z(uWA>lfOJPNCe@uH}v6!A&l?ic?~tXliCBwfS;=*3!uP?x)tB|)XB8KhT- z5ApWo_&$TKxX1(_yPm}5VtfUDfh4?v+PT`^rJ!isdyON)I06j7W}_v`seYRN&rX3H z-(>cwe)@pB&zQfx4_q^RyWDS_Bts&( z$xX&{_;CAUz`Y_L4#d^{O6e$}g4N&u@d`8{#_-&~hkRAX33g6OdaW_H9I-wyS2FOy zfQzG8KV<=l=mP7CcB-LAqTk=SYc*A8M@JR_CkAR~eHDN`Kpxmr2in4kLyg;SqUfOo z1_r7IVcabQ&QZ=F*Hs5DcWZOOl=QxaT_d%SS!|71TWp;z(ZCf$PvxS2e&pDZ7uwZsM2ymA)Yk=!Has+YY^mwHwH9hD)?L$R68M@j!Y>IoCNv?!K3aAWH<> z;3VcscgadH8P!i(a9pynGRG!?o`PU)ZEc;UWE=Y~c=w+`hYrf)9{Nawm#4ir za=gKjYWV-GTc9D?5hgX|UjURN^V<}bM~~DxU#~c)fKqT#QCKC_Pac#8*R6MhHjn*$ z!E#Z2Hm8{mQpP;9moGInwD5w!f{F$5OjAZX=VgQypV6%kSss3mtMi=zJ6QBbEhMwp z!W%>ausQqwVy%+s^Rnjw?~C2g?3G_c-4}-0)eeCjUeg7im^cw(_Guo&QjS56@fwg} ze%8LtjcCRK?vtiyW+UlIzEve4ySM3#V@fx6pSwBN;qc(<&-xtNn7BBgzXSjkM%oJ# z6D{fGS&x-_e*=ofZ%KVX07wWN`7l{pJHa=1n;H&bo&t~}ZFqQC{VvN>LYnsf8@s@L zR#X7k50xQs$I$$d-8F7dqQbC^HcglIiMIuM*oL;C8h1Gz?|$X z_r7JEygfy5hirA(?U&|_I!|nA4HZNc`O1y)iV2Y`jp&>)g*d*!wmBE?|9`NqRM$O^jg3;Y?+`M*EB$I%|Q#-VxVFj=iSz6gh zXZ@q8fFS1{2mj7??!1sf9cEqTueRx8EcOd$ckAEzzVa2VO8vU!rnb~@i-_uRBBy`l zRc%mbyNFU=j=pL~|Ek!x1=nW2%Ar-Oi92VPt-*w@vM5@S#)~z*2WvxXuozm2f*+gX zRlyUrkf>nEy1X#ZajeLYQ#KRxfEh8dw_t%GVSprX9-58)?|lRJ=(QE9|HU`ZrikCu ziSthfkNUg&jiJ@R9l&{Qkgk@zy3PiG*X1B%h~HcOF~AG9Yyp%Ru$;;G4^lm&K&+uU z#3QjqLEgY2IAU{i6aUMXwgQ+RC_N9#@gPw33Mu~d<*#4zG)v?tXJe16@F-^YMdC~C z`3yxKzJ8(avud7md}*&@vK~G>5C{=&kLLUlrk&~QI{IWAko%ZEwkHtUYhIZ&m)gZ`j(dLxt9}#^thFPot!JS^NRJbB_QI zKJ|Ll6VPhSaDcHm2OgkvWoJ5ZMBw$j@-IV`8soZ1DEpBI{CW<}t;UH!F*=#(H_r49 z!<(-U4}ooVIi*51o9_HD*}%=fkj9B6wa-AD!|RJ6erEBaouC6m<~4Q@Lj*C-R4hY8 z2~SqxMuF4UzqkN*B&CelbdQ$+hfvEI8PMsBaod1m;K^Tz12Xrd`may@WncIbC^`*r2xYf z2jm6V9|!;4bMSx-p~zE7EWLNSM0mKm&%AZMR(NpLvQj_zf$t5d;iBDw?W^8)Y5p)h>9JTWl5;MdlD2P4pF<=h!J(HvG9g{hv95)qH8QqlN z{`f$^3(Z4K{Veet;1?tlSvj1$oj;f>d;7lXb7Gqy@tbHj+rYclW5kUP7sI4vnJ3;~ zpt_%q$+#nVZB98{6y2i}x+zp*&7lgIiKTg;-?&(geB~?jR5X=Od#K?VQ68s~sM>Wt z9|EPqx_6v^KsM|Bv^!Jch;iq-p4xMv6?>qIQl1+JWrhy;x*ImD0~d`6s_-+~0i&OP zkZ&c7{PKhJ;{Mx-P%1G#U6R0zHP=_gEo&7X!|l56?P+^JC1=0y#oF&BdK&VeTX(mr z->*pJSOU~`KBWnZ-pdy+SB^y$F&4hnEWbZXLN~&<{>!A#B2Ky=2N;=xgj)L}ksTp| zZvL1193U3Y4U=6a*V{O>tsfqA?=R#X&u^FMQ>_~UcLS%@I&Z4{QTt)%#Tivy z2K&7G=%yU1hT#!8@3fAcn3%I|Q1CQ@L=M()M)Pyjo(%JkyP%Pch_}=tX2nlnc=M0r zW84+6LO3+nE0m6G-5=Q~3dS&6K|B9BH)JtiawBgnn%4LSn&VZ2dLWCmK;kG_OmOru z{HyFf74GL>GYoiO{ly{Y1xS1)kHJz))o0_QIRVQXtTgai@)oUD!(_6dH4Zim=}CT1 z=+=1?_tnyhr8|7{)dE9!*Z=AZc^k-{VXp|eFY)jG*z{~b{y#G!yg?*Vj}*{?FirHI z2CL&=0tM>Do|o9MQEjVEi%OA_hr7TK62_w$L3CHFMe$dgZXw$Aw^bvUAVoZ9T-7$= zV2CQRAjYm5xj9&nDG_Y+7Nllec)$rzwP<|(gI^A?p}yftHXxVcV*Ae(jFfHh#^GPi zfq8yF5PzAhS0fGN4f#Mp`H!>fGsc`C@Ao2qc?SZny@vI{zb)~<%VH#eVIE+&|1a+T zyKmvY-TgANMUeKXkil=FA?cIrwub7k1$&NlRy6-J@J2QN_gStwSr=QK*Yb3sxdvemzv;hQrCGcWVdv`MW?$f3z7Xxp z7`5zkVBbh<>JvFnyPV8a$&Bj#&{frJoql=ITb0@Lls&yuP{HH$l#u*h-a&h|uku=q z=5-n5#vrs_tpq3!Fi9kg62R2itpv)0Z@=E52G~=8J#H{N!T^z(&KctCmBX1Oh@n$P z|B-8fm|4H6RT?{B`$^-J_Dpg~<)|sgq|s?U8^5BIGdkyQwyHU?VF+JTV5=xHqNQox9MsgfOWpXao>qrFmk^=40Zb4pubzx*0!Ll2$y5z@m7R}@D)OYxsKgAo zw|9=^Y1*%`Ze1U_6dStXN`I$6*PDhfVY0s*$m&LR-fCejXMN=^X(KO{4(zc6jq9C@ zW`Hu900RRfn}(K_1K{>6GP1He9i5$eN>UfI%H=oE2N2{n<*C#Kk;hK@nW^&B3`WLf zW)=}n%Us%a1>zc>8&sI`b=2X zY_@UYCZ5-gCA^|qJZGSzQwn+e_QPm_N+tr$XydjT4gs}PM4@2j5_m zv$Gr4A2^DOe~AI{{YvPnPx%duieC-+aZ#}|zT>&d>q9PWolm_xu5Ub$!*#x478UX} z%4Y1%wxvVU;C3mOk(&BqA{MFC*n>E*j0Erk7erj8!N5G%Qhu*9FM;ZFw&L+k@z|d@ z_qgtCy9am1L8Q!mQ{;H`g410wPoyY1S5@xX`3A{9tsAm72Bv5EpgMgmucTDUV>y@` z2$=ky)k=gq&EEV#&cS~S9{qM@W#tu~GM*=z$C@dX7OsR5h6apiPytqwvrB8&5O4K> zfj`MPi6>Ej`-94Yz|8gu7M_Q#0z%6r$~960MgrDVs|rMYt}WJCDLuR0>2o8&$!v2U zBMp5Jvk_mM^2<9}U#r?zQ|L9Cw58C4TCQD%74Y(yHQGMiR7dz9z7AO}1tv^`2)V%~ z@IRbs^x%Hcigq<5DRodYi=A9Z1H1}ik^j^n2S>Zz4RX70{yZz4hUUt53~aKK@`zXz zh}GB_7K4&RGcO6^6>OFB@U8z+lvVKuuSPA*s(BB(goO|jho4?t+vYm*oN06{>LdIQ zXMS&#}v&o&=xJJXF-&Aq#=g$1T*@T9M`ZU-T6N~by!(~1YHl7c4oBZ7?HP>xZCwK=SSf?vlIf2AC7Zyeu2Fe zJOO)0;*lq13Wj7j&OZ)k63;EJA%5zyo<*8X>NIWb;xIv#iPH-O=aXZKiRzWdBW3Cd z+qTBCo<=tq7d^C7cA%%r^E-Lvis5f_i3Swxi$6Jjt04y~eBxN!9(iJItg z2_pqUBW!f=bng(B=zqLN-PUUdkyO3m)o1x053AWGaeTy==3@yT_0fo|beq;MBzWf6 zj4OUGovaVWSCEwpKNUki1*q{BfZKY21)|qNC;vY9eb&^}w0igH=m$TLh`E{l`JTM- z;CeZr&UDuE%L4KB^gA%U#{JeA^e%^PJ9|l9)^uVVp^}YUpJmP#yjj}G?%s+SzV0=X zaUtiuY`i|bQ{Q0AW~G{wHumV|6WB3iFt_6Z@Hpw+d4LIGzEbM)Hh3*Mg?38l)a4Gy zkG1|ZeTc<_drBjm7gK#eu%L6Rfli@mv;O6|H}gI1jA8~pG~VOrjcfZ+y9uX*ha>VN zV%S#sfoYdtc-@b`#5yyx1mAMAT^)%NLpuev&P*U<`q7`rM&CrXo{t1k;s*yhWCA0U z^8MO~XQQvdt4EkVA}l`msLRa}){D?_oZHmUGL=4FH&(-DIZ@`U@pr&nr@Pvu$nYB? z2{$Qq?7dh|-%(54Fp^Skq`Y`+eswt_Xfcsf+Bka0KN^zq?%g}!Boe9)a$xRfEl7HR zL;Xk)7^zAChuAE@$2Ap{zj6cRhj>l|91O^&_ys)xeC;#6WYWm*M^IZWssL&DV%~Z~ zwL2aJpY>8c%s-=|jVsnoJZ&yUCbJ{&@kh!Cb50ao`LH!!o6lTwd2RCR^pQeJ`{0?JnkEi0I;_1Z4UU}+ukFsZ@N)rnLp5i|Bxw#X zW4T;LRM&lOK#BvpC{ena(-{ls370Z70z)s2RJKHg>I*zkLkNy0)Z|hf$FQikaXk~d_&n$xS zq9PINJ1$(WSwISbFx*sxsielM@G^_ngd}bqg>yW{JOi_?2a|}^qQ{blQ^E+%iD<_g zU5%2Mc%|lQxJaW2pw3}GR>Fs@fx_xtOnkgc3BtP1745WkhQ{;cdA8S1Q1tObY)hlj zOrV($+TJ|nvg`~bjzg;!c-8zfeT+$@`GIZkMHsK`_>*nEK~+_u zZmijGo(7*@wOQVRENb{_qH$^#uDi>b(E31LB2CTso54K!p?Mr)CO-|?rV$_P&*txQ z*tScKeWv5!>wP2cpelYy;&-;x{z3)oWUXz$M0PcSe2Bt!cA$*5bW-(TSm|INIvXWO zbGg2^e{!@ntB_fC)%r-pxY@;unsSw~aiYu#>Z4q6b^chH?AWyOig}|;ZlSV=oO)4o z!^u1czvjo=R;!V0^5X_RyX*RD=Zl7&WtTIHI!ER#EePE!~zeq{n^xn;yR<0Sh5;x9QOxBq7IX3i|;WLtfRdvv$zqu||u5!?t9Wie#_XekG zsjMt5Y4ywWoJ@p8uROU3xFMZhZVaFrIhdEOpC_gVue4NZu+Rx(g*)Ys9dYU5D#lCi zX>;!DB|fG{6%@j}b|ev=&R8SGYLj~q{70IKqELF%3gHyl8=D?CFmpRBHDEz90CPTEp^M!SjIHWeb-O@w%OaS zrrl;;q!y$vIZZna*cs89hQ5a7joNG8mzdf3sy&*S{tot?<>Sd2xqY$xj3LhH(c+|G zWTU`38%@X7hQoie|ewdVsc5&+3h=942QFUs`rmihPW5vi3?5lQ^Trv zSUc0(piZp@EhhTIYZBXzdjNS5lsz2bzu=U}uyy3SlBR6TNm^yIZJ(xb-w`$LFEXQx zCn%Dga%CBLq71J$I$-sW{oXXRnC9*W^wl$-YN2A7r;Pw5YkxT+f_!l?<$4x4B7Us!oqi)vcoj7z8kSf57}Fz0fOg%XUO6#x zdMf|8Fy~|)t+s@cq0H!oOQIfa;N$DvU0GOo-2Iika=iw1j~7(FSf`!~q)itl#Trn~ zrXMAYri>)SV2CIhB4*h>DI+y>c{`)8fEO~bG!c1^TyWCyu?g3`jvr|=dpUQ*AQkI3 zsUaiD$0k#_a9=SEt(d#;E1>-L_*OTY-J9iryJ1?b8x$ zcJCZICPa86LVkH8GVry>lr3C*lN2^{t2@}XaXegTA9Eqz3=sk?5HZmMKfx!&*kkyTAg*m#9Yb}tW#YL+uP?%OW zS=4Hvm-53U3it1D&$nqEvhxBhhPKG@fu#SVAn(Y=BvyKXaDGWSZCxnrtUdIy); zQsXX%+Ss{-&|J3bo|HKBLRK_MR?wO}$H*!$BMU+rExwIxbFUj(Pi`jD?L%@;7i42)vP4@W4r*wP zcCG%TV$$&DNu+I+Myi*{`OmRx>yOshfsLE^(){%P!(_G_RQ?i1o|GQ8w9)1Fc+X*i z*rew$wf*8}yaQO0?CRH` zaBiP@=ZV8ZK4@s;5<>FVM(uvNqSPf_$Q>Z)v^@4~IH zXwiN1`_)Z3rIH`3S~B@e*tf=;fX~mG87Vb~l=pGNPL5sEj@Gj0QsuG5IbX6}TcZxH zl^UhZUfGq%O*I%G4i2gSW{V9hSmNf{{HGLP5^$Xc4*m4b{3z!3LE}8bP5`JK?~Fg8 z-f3_t%!>C1`rnDW=Up53(kc19Xd~)tSNjDFnhBy0Wh>db);~h;w7c4no=RtmnA%pT zk;~IF@HP7WynIv%e6n^>X=rS4r1@;GYIb-o%khpRG6otSZD1E0lyX)>OJ36~l5-cU z$l~0~n_G}kwXwn4!|{(I=FngqwbdEpl$eRPfZ9`M8YMkW@SO6Yq@$-B7)Fm`F*y1D zaJ@tBp!XI3!O2Mn_tKi0JR$VZ*6p##$`j#mPomQ(>@^iH->WYsfwR}98_oHf5ks@& z*~F>me3pYRuRvT%fbxjfB~>Yvw`Up@@*dK7iuU{5%mzt}Pm#nQ#}arDz-=H@%H9C7 z04}GOF*Y1mC#5`1_0ywfU%5v5GK5Z_u7$?m=MKkE`%7G=V3M$N`)l5XK`)Z?m zQSUI*%+?!{l^Ru~yb2asHGi0lFn^5IaIwO^nXs%@@rd(e@GQ%ilJq!vfheC8sXl<5Y5|jok5a_@ zv!VmEvpbb9YH(->+SS=yfQKz}(p0?4a|*!XgEeMXo_ zT1x7mWHv|kWzZS+%LZ|NQS?O$5h+n{SZV3yJdWK#6*{yyC_UN@NWI}R(vjY^4Gn{Q z$+M4t86sMXLG;c$$bju&WMkD6HQ4`h{-&cyC zGc1nq!)GDq8Ah3xw|c-4S@`$_6dVne(JSF?-tF7z7FP=U7GJ{&R$teOeoTu#CW;}( zbC2O%V55P0eA6qU@XFLKet8pgIR~FqFYCKaBSLa;xFRP5FzeO?!WN$o>&=gM$bn?- z7l4uz0_&#`n!dHCjc`T82gBwUQlSOIJg=5n0l-Nqs;=LI$?y-e>r;hzO7%@!b#I!s z*+~uS<81aB3u0Poul3y5H`g?T1vm_^j3bmzHHAL}3B~@rIEiIY9ynoIxcd;;>`F9$ zujW}DAzooD;#uiS*LJPOt2=o5r1Uw_wvo+}b5iQi1QS2J8N)+r@!l--Z@@=B3I?3T z8(cQN)A-Ur#CL{jrvigO-kwefhCJd#3BMqrC`kopkW^2~qa{RZqh4zpOWzZAbvE%< zS1%d7%VI^2X4Qqu7!n$)P{Ct!60G=KxGS&;F*y1pj5eR`&-sbux@og#Z|ECA_^Eg7 z#!^&7JQIyFpoKq(Vk_}HW)jrXBfp7aoREnulM0WOPS(7Hq*caXuhd@qP_$BsY+C?< zXfB<3hSR+4Taqt^qJ--)?q6`g0nqV_dEjuWUx_~vHOF@vf^-hnPa0CKvp7`$bZfb< zwbD+;Yo@P9GE2aF-^%T?WDW56eC^+3*1)lRwj6$0SX?Ln1Ji@Pm7}WcVpStWz#60J z(YX>GP2X2TZ+3Oa2cj5r-rjTAK07z-e0osB{JUsF9^Ye_NqM4E4v{=!r6I$CJALr(QjEaMpkdOjsVSGh2Ym*LI z|H~}oDJm{wJ*&v60`s#z|yR9~3XmyR-F*)smd@s~vxX_x0 zhBn5W7V}~qjnGiQDS$?DV48Wg38~WC089kBLMO2J+KQP% z(ynH3t91*8mkn(Nz%kTr&CYA;O@IK$KuAa^B`qUk6d4uO7ZDjb6B{4j=G6~>{SvG8 zFD`%|bpuNTpE53d2JQ6dn6KT(ez``3FFq`^U6-RdhRbg)e1$`OxpRx#LLOomp(VR= zKHdK|!>Bn7SO={o=uaNtRLD$wjh+6<_ofx)^s;Z)HJvL#H?_03Zr&tbh>hjt+2XVJ)qFnZpdU`8 zTj@j$0}*0C?uP8-AAvhMfPZcMpR0oton}J~9?yN_Ap2A}-RyXe;v}16{AaDJ&Fs#B zc`xa@P$8qC+Kz(MiU|$v+6!^yHfiaBPczenx+BbQWEM-k!Lz1W-Gxu}t+*XiP(H6} zQms--4Joeu1fMuGmhd$xh=j%IJR2HFIpueW54B>_+sd z@a&R^veIZpi16bURcgo*sM;_*1ty?ArF9MG-1<_X3|4Ndsx;N7*!LS|8_u`{fC<{o zdx>Qv%+?H@qzI{x&Ww>4ut*PsnE;jtINeRq8U11-lk%#-ATMyc2LeHTL|H$i(+y82 zg{qD_(iJIDUg9LCo{!M$!MBbHNuTGulIV?OP=B{*E0y=+g~jle?dm><@<5@N{_V8< z5f!q;n;#ORSFT2nwmb|Sd+H_0kLtW0;??zZ2&PpeN4~UC-Z>cv>!@_iPOAW=)rh67 zYCdorUI@rAsU@}^(zm>5mU4c1ETB}9$YfkcL2WK_K%eR~uR?kL`p&iETXpLKRH;_3 zc-!Ep{?W@|#`7tDfs9y%g1)pI(fvLR8%@oa{91yMDjsf4nEoN6Y*t(lP3&WLkIT^_ zu(VadWv8*k+2BRCg2s!~>fJ+Csd4J8v^(HIT>J~o3Bc5{prQFyvlBs9u42Nw$5X%S zIH#mtMmVq2j+;98sQY#;|^IfC(l zj}3eUo5v2;D!!Ys3}h*<^y0;*?D}H)8Hd2SdX;y9E8*>^KdjxA{yI<B2phnkbSQ zhLMR&S~X`zR(F{j4>~kmXZ&!pPyCJb`N2tIkDXVcUF~*F+(wyGQedK?sX2o?NY7ul z-Us!HaQwkm7!jYeWa|MwKE$~shWx+u4=YOOQ+qeIKn3|fm^kA5RyjYEVw9PN% z!|8p_+qd-_$Qnjr%BsMn62W@K(+O~u zqYqm{8wDZwc!%#cMmQYS3n(kBBQ_u0%u3sq@Lis$7%qEm92UV$8aNl|nn=u^nKL3d zo5McV^kmOrAWJxjO2<#+8ovqZd0WYA_?((pW(N1QKZ@ZivCoLg`?EJ-Jr}=YO5I0m zJXFvQ$B$7<2i3s|7o++M=!C_4*Y{|v?2CyunJRe;of?OWw*o5pZ6<}&s#A|BgcYmt zZ>hdcVR|muD~4_J8(3bAi5Q>YoY!u|A%p(oU;^Lb9}e=4N!oGIF8Cj9oq0S|ZP>@r z$VkfANz9P3hqA9@8ap9nEh;G@LPGYkFCi7#Q(2Ow2xSdLC{amd&7O5aqXzGF^gQqT z{PFtu!{_77nRD)Q-}kxh`}$qq-y?zGym-__hFc+U16zgJPOByYALg0=F!U^xx%zK( zEt)rzeat4;^k#!El$Y^2U>R07r&OmGDxVi<#e3S-gu+lnL|~nW&LDL^49Uc1n4N_} z`gK^q7OcFxD9DNwoY9rXDX+pU8&Njx@aMjC!N~wtZ)df{p+BAi6B#dze7+r%mV9%* z1>1XR@JRHd2h&4q-Va~iNKE-ZZer)|e`Oi>EBQi0(0;hdy^iybY{ZNU&VIevcF#vv zAcHMNhd8a%VTF{8@8%63-#-cKeiLYDE~Z2@iMiz!D_OCmbCyGvde8Pwzh6upJYC!-lgr3#|d6{nNLK=cU{r7rk zaMG=ekDB6W!U9r_MI#*KkuKV~?ZOFX0|Q3H{Z#p=D93qt7R|0iS-zrenbaGkS0O0N zz0W6LTy=O53uQn=CvcuEX4EPkzLNGZVi>y3RVIygod8(}?31n@3Nz zz>`2_=aZFw6e;Uty@>l|@%kGl-1yU-1T5e7FpsUJC+qIpI|567t3NRLxPo=rvxiSk z>>&=>{EY7=wmC3|5R?#yUMEgC-(pUnI-HLI$-@7C=TnMRa{i|3Dh0AS_qqZUA_;H- zQ;n+L0EY_T!|cGaAoC@2mNsl>@KoQ0H&8w+B7Jxc^MI{Oy{rw|?+GmnngLpOR3?`D zh6x-U*yMPxiG_Uk-o@R=jcF7~PZ(Ix&Ts%uJy2!{sj}WQW!h5dA!;Zn+}))9Gy%Re zW3V{)87sDG-(|r&P?vx3Wc7%5M!@&+Poz?MGbq<-)T28_$C937R9{%LJ_NXMJ}&Qn?WfH#|99Q?5x3HRy&Li%d&g>;}YEFH#Bx= z(F94b9B+PL?2H?(ykvWA?dydZI9Tyr8*qaRfvEE6RLxb6@ss4+XbBO89x`>ev#U-d8QJq zpU$qX(kXb)9)E0Ij%VhUA(Z01T`mBnt!3@fx9V<}bSPazz=)_DA-54Q;;x;ND?GnJ zoK6hhvqPTF8P_#d2Bvy)Zr`d2U&p*^B+u*q-GN*dwF-1oLWxH`6~W_sAi=!0W|8xQy@=k>4O zkO)pW1$9B#B4;#GrNZ#L8s4dk>&0jE;Ov zPUK3O3EjaUu2?uX=gKR4HjGuftG)f0V7FuY?fWTq*P=gR0?ZOx+f%CFM%PmgbNWbT z+51g!=#uOLAMmMMDL8WAnUKD*@rf`gd-FUak8c@$_?uQ>by=CpJS)PykaaLhgbpEG z?&1(vLLDBaG09ZXhPof~H|=1sl3vZ7{5!9+lF@d3a>iK=fC>f`!(VC9SJmw5J9(29 zKNYeq^R3Q({rJNM(j~~Wi{(Z=L?DnBGN%jwj1!pb&vU$;YhI{XtUp_SsIRm0gC_MZ z&-qsw3VKX_%enMZ&QCAPHBU$hb`kvmI=Mpq*(IS#0ODmy)HU2O?G~#*2lK7;4rT$3 zlbGTF58f`YWW25LsZgHUOi<$)@j-`SgT|soZU_H--8~GDG^Ku^vF~NZgmNpmrV6&N z<2yFW)LRn-8pJoWSB6XNHErsyJ^voL18|5wizU=X1c&&$kp5S4QqpKyWbg+Wfyo1| zW?sO;;hY`H2(dPIq3G^O&|iBEOzy)0AJ6PSpVTH8w54~u)B7LH46pS{j0tQ3`9_TP z(kc4j^Y1hCFTBq{s&B+*iUmVnJ7+Bb2hm(P)tfx74;)0tgH6Lw2W2Go{aqI-{gK&5 zvpbKrirk+hPO@4p0I#f~m(qmm-u%sDxbYdM$(iL&2PBVnQ6g~U>2T4>Ylvn)%E|k< zrn2&jRICM)-)0iF<*8FPJ()?mmH^mzx#wG=4^RlOYTE5H5!l3QNs=Y!H^YEAzl=AzQF1+^(0?v&2sZU(G3$_(bW=6iMOnyP^ z1@%h85w8V@}x1S0}|pnAR+SnB_SsMLqfa-Qu85% zgoq(#rB4>T=?d-XxvAT?c9u*>A++WbI%ZJ2~V1Oc~* z$}BRsLn;y#(}xe;z7_i(sDzn;N%Aqy)0L6{n**UFCZ^C1qMk}V8%{Ox7wEbm&?&)=Qk%XlI)R@%*P$o6otEhn4&_(BW*W~c z|8Ond$Bl-_9}8ige&Yq4z-*9Vsr3?m{PQbGlUD*hup2dt-{W$e;la5;=`b}601qWJ zr~}KkZHIY}(vD6`H$?HUv9TShIOmSX9maCc-lEZ!>$|ta#`g6Tzzm6rS**Nl1fyw}Ot94^|Rj%<6m zEHLfuQeKW~`ytQd#DJnR0B~XC@UH1^IUbrwi6<@d33j@dXg{|oANtdn2Ir6X3DZXm z0s$Z7L>kYQG-uNQweY>_B8&SrwQypEV1G5=Ll=!%U$R%_KBLF_L2`Vwis(7i-0nmv zE=zl#!>pr-lh$dBbibU&-0-9a!i^InjFa`T!u-xAAQdEj6kr^Qm==;@XP|XJ;@f7Px-cv)rI;l{ju{Uje@Nux zQ4{1#vK``N4!~+4a=hY|lMtMON9(IvAfYY&!okzgjBpaCCjC%-ECGc}I6?F8ck~Gg z3hshn(o|4fyxm5q)k&+73ET*gWYydqfD-C8($TWW?1e(BpPlrQvRv?o*SkneZy#*o zz2}3?|Km`O8aKT+Foe*?GGhJrzm46e?dEpSd%pj(NBPKh zUUel$Gze0~WP=bu^Y+*ixXfZ9N;&4DRqS*{4&k%p~;1DH(6<84QW)nrNrGq>Wr@ zsF^QC?(8I^Uwu^que<^6o0>ai4Si71{S;jv)n6n;2D_byh~*BbNZBn~89)U|JK%8Fs&h7x<=^~OU*zM=jrOai6F*bA7h2Fgh`hl4i*9@W}Vm@vhhl#B)is3|S#@vD}? zjYHFzxYsW4Wad*c#f)CWNxM0LhDeBWz}5ER#m9oxfGgB}8nC0(vNY>#iiH0+b-UFm zF+z2Km@gl-$#kOqj`S%I592f5gg*@-RI`-j4ts*jmtM1m7V45!UwHznhLn)d5k^Yc z7N$^Ma@;!gXPt(GgdyX~<$YPm{c;Vwsw|xMgUGOOn^Snp-i9KjZ+gbU2tx+2nM5|2 zNS7q2k@ZrUyEZO^jP7aendY65PE!?W5VodjP?wHG%2-wjwkR<9;>LlKq7MbI-lWp? z$+lb@kohI~-@IM0HCb}0{%MS-OYakp@gmhJrI>n(7+nh2n7z7&$4?)}gqm+bMG(>A zRyz-O1*E~4d3VTu*KT$cF)D;(Mlyl*|dBGaODD3bI(e1TIwFf_KFk@(LQBqd2 z0IopQCi`X)#kJWz`~$O*t~m$Jgmh{j5H3rJ^Zsvb5SqbBS=>N)0xnP1xKOP1k}xkWinIyc4VRri;T^*79q zw+Qzjm_pwe$sf{m6AhFZnt>-lFHTT2Imt`*p7y*HDdnT176xcP13f(oEVdo*3_N$? za&x$%%)c5RDUy3dPXagKc z3+|+$FPXcZNRF@To`QKRVPbO9;%Q3??G02Yi|}dr24SJ`%xtuOOj`{b8Txt-HKi&_ zjhgaHS`@IPI-@pC5X?9^fu(OAvU+Bfu6rC_+a^DiLV9hewGgH@d>J_N!mzv~5q(3K zVjoktK+68w^Y^D0!BnSXpjx`5(h#~R2C-4`R9h@Wn9tRH<@{DHYk)=b0x=4Xi5lAd zMOxzxJ>3~%<1whX#|a1sl-BCLP5}@hvcAwA65V>VNh|2N21=~v;1`>dP43}u_rFUx zQxLAUjN1o^|0mtR_0&w;RV)re#ZHt%y1%TNgP3K{%5gSLx9x1U_MxfV^3#@@u*8x0 zRMQ((!F!YPhqZfTGc0o~++x${>X$T7m6^g`L}~wXol6Nd*5wyTLD1}SzDuY1=s0Zb zFIc&pp|c>{f_SpdpJ(`opRU5thtO=kq8<7f@5c<Z0<(xjO zgJ%Mzxwf`eD~*@v2-DmVgdq{LFY_L3CESh13+d&Z6dwP26Ew+zfHSJK;5&plFz`VN z&8-vi>tq((eJRL=y?6g$-w&n2(>hRNt9aWAZ!MV7}9V!e?YEdZ4d0(&({K;4{A1x{rm6ziJpPolPeVD|A7Xb(XnekAc znhDVk^V7q_Bikfhp_skLb$1re6|T#4R4Z~}e%6>CHIrqTwl#hap-~iOWt}7)%?KoH z;(IZpx`H3~qghS9bxUIg^xm`A>6ykL{W&wmzVjLm!cYkA$Ekp%zow?DTBF+ITy~Ct zokQ+=@V1RV0qB5P!6pwn(ZYXkFW0sG$Fi4MDPIjJ9zo-eMf}(Az-f# zdpF+%f+#ph4o=C=v6Tv)l%uDc#KX-mtcQ9d!P^<7G{4^@?Ou@Mq71KsalP)yS>fl>@{Ub=?=u}MmIawr;hQGFq%e{#Sk#_p&W1qTx&+N%1<5Rg){{xW1BM`#tZ`UOhgW}#D?Oyp3hZT3f z9v#Z~_ZsaOp;A=>97sY#U`Kr~x6O+PWnT9O1+KYK5(>|<%ab?4;$1oB6=Fnk^=dYb zv3gESSmm;B^P57JQBYC0y{V6eOVzSta{hQBURYIK7qfDFvRbxumsijE&TDaVa`91F zZ}~JQC$Qk< zWaC|(|KUTiAx7z6AC6!iy~U#@|I-vm3L@S0EOy*j0Vl(-O^#}m3bpuXS9meDtg4+) z@ps^9WEJr@9ihI~2kz?mm1^=4_MWf*J*h2j$xHtyUbt=Ygqzjj}-cLLbu$VF%eD~^9|1stLtC#XXuNek5Z5R{;E7|?l+7`V zE4{WPCQcTByH}DYJfxWi?zJ}h#=ey@Q4czv=%gOFCK7)2&qv!V-k*o3p$SkaG#MtB z*9}**6dZaT&LvD6Ts3%ZHkuok|E9K)!`3)^H6-43ajD@FX)8rGXypclkFSiM?q^+} z+QKqJOyHT|_3-Ivk*LkZEp}YOqR@io86SblulyBU{RaK6;g*M56JA)ipSjNhV2%4o z8s&BTdl;3(JB=rl$$Uw}RS8c0au0jMW*;`Wm7?6Tc3W#VFe-Fl=gvj1F@9ZaMwXg25|XAUKC??i_XPBTJ+OPp2uT1!Vs zsAEF!-;r?=(xt8K+aNSXx|qZZn658J_Hq=_;-qO=wEs>NS72>ks(MTg4Y`t=sJR-S zfkLkfxk9>|{6q1IXTPLf&Tx$bST9_A81&jGBlb?{SLlPZd}xl7o74NZN5y}-Di#4g z(6Pq+9Y)+pKQjF`4R})QabyWU+xg0E``Yd4w)Qew3B4L7HIQiFM_>CWsf2j?`u_ly C#<+w4 literal 0 HcmV?d00001 diff --git a/applications/Chat/coati/ray/assets/2m2t_quantize.png b/applications/Chat/coati/ray/assets/2m2t_quantize.png new file mode 100644 index 0000000000000000000000000000000000000000..32f758bfd249f205f7b8080242f2e3f8d022e3e3 GIT binary patch literal 134715 zcmeEP2RxPE8%K*&Xs2aFsbsH`ajhs@DT>6!wP*H7REWqb86gcNm7-*{Q!F*3p&X67c^NB1);DDLM2mq8074MX%m8#xUfG&39a zYX3tFttHGfOmwAfeNZP+`tY@jQwZ2fn!v+9_^??YTr#MnYFU}GJ) zQO6Rag*Jz79=K#-Y;0hGF&TbQ)7Z!et!Xi=p@x~6vDNTq+QtUZD51BSfPoCV341|N zQ^NrFsxn5)LU*7oFBjB3Z0lWUjE*k+W;Ryn7efuWF?7LPS3}F#YT*9RM(nH_kXY#q zwgXZQu?l=JwC?96e!jBtaSrt7sD^ei+*x(p%}PC!4*7Q zgJl!@R5WZo2HHWI4LuQD>lbreK0$XevuC`oPxAYRJN| zb3jdWk=6}v2vg+G#GiX$WPd^cKA3oaE+>dVvhhvbAbEa2NWf$xtwAeirHipZE1GEZ zn=LCaodQikSYNCni)$ES46r=gg*G^f#;PA^gZ%+v8^y)@jRJlxgDF3fdZTL-JP$*X z!VG^$P>~&;@`i~DH$>Wri3;x|Q_;})C}c>m6ARK*^?x_E(6B)K4!jvO@;h1!gZw(& zGGQNpM`iiOPIClJ*?93u=78xNGDv@>evmVVP(NW!3oy0zn;XPTFh23b7#JX=P+J?# zt*Hqpf-`q(@$el10y9xsLDn8tVA#+@R6|e_A0c7$Hxbz!lzIdmMa(cjDJPYXS{i6x zZM=l!*5pMW(Vikg!iIiY2nkKk2tq1~D@`gDwY9a`M-fNvBivlvlSM_?I?jYR0%rm1 zFCeI>AU&x>)Z#+(YH{MzXQOw!|ZH7S`foYkYRa!$IIKP9c zk1&?NYsGryhzWbRr}9xK!5B6^$fdVHf-x{cGyPx;JL?z%wW$xsA&U!( z8?FInhvO-vys^O-C#Hdu9b}Ewia|CUUnnrdmNAE_Ff9-|I;h4|Ks4Ma3dd#*&CS@@ z@HcjA+9T6HATiHyF@@&@1jX_C32g9$G9fXA--htb2qbR(14^>NUM-~Y@N9x0<&*`k z@Cpoqkc1MrP68!i8J!kN;>KJ`W*Qiy|5%Lh2ULft47-f1kkf*oI$l!%%lX7C!Ed58 zlqG1Apv09=bw=gv|nyjkrVj|?^j7kP&T z8!$0vAlP90TVlKMkQ;(`XMo)B!tW^3{r6@(a6mKUFuCWPK}O&@N-)T00zUkM84rU= z2@_w;C-jU5c6MBU4aYf~dWdD- z{-n3xrp6!!Idv%y?CiKmE)JW==Yvl??I%~f+@^-;F#%=Ju%P2x9VRMRHF`Se|WB4J6 z$zy2Y_dgUQjGyW-l?P*n>CAb67jF<`9~Pz&OwPkN=n4@MPqaH|Jttp$CpY#Q5`8G= z;UA2?#=&ud$a&y|mZSdr(FRQUVbKO`DTYTIkRvez(FW&V6Woo1+>pC51LWp{4BRNv zJ+(Ox*gR)YAi&J#0NA7q)rR;*l?QVmqZY#0DjnF$0tOo9=3w_EHpdQJ76&_}z*X!M zLFEh+Y@Gyc1GF{Nde}xPY$*g@O*H-uen)t@IKd3Vff`m)V!(+Xwo3#GWrh`wfV5_~ zDnZl!-4O_ygenslTK}d!2NXLF zRWcapLI2zOix`Zrt^qq2?kc~is2ms(wps!^s|s2{7AIQ7(s0^~O|Zi%GV#<4)@`iZ z(5g4Y21xq|qSORC*RM_Qm~FvOm6v|K#wt0e1v6B-1^Wd%eyp1HtN(uNmx0nSgVq54 zZRoaP6<>zg0%(&H(hv+hTMO4(ppCSUwqn#;F_WbKY%`jHdo< z@M0O>=0HU!s{Ob~1-uUNt2K?$O4tB38rveq3|SC3pJyaeH*OphByX|u5^%X1cCKG% z2>+oC>30;yZ9@j#i&1RIFcrti)E}%GHA$vsY(4}hG$Rc6A)t!{WX#7HAD zqWvUT5nPxKqSOd?c8Z+_tjR)DGx?#Jk*=58u6lA6MXz&iygJefmRlwRe#lOA2Abwil$-u|k z5ZO4hVjcwLJ<=@IDSE@t?;R2vSU4&OhG16W}%gy#0h(Hj>5= z-Zlt-v9m&u+u%+BNa_Z^BWOLCMul9tnv^;nc*ii=X)M#8k%4zqW9JNl@>7?Ffwzyi zxiAEhnSr;rW2Y!2>+t?4I1U*=msk<>o~jH3y#2>Tmk1?&18;XHN^U&!*|d@yHw-6; z?Fo}Z>zUiJfVcl}cMvZ3^zUNO!7jc*2>xK9di)gy@eX3#0uPURbO&x)1s+dLoMGuT za9Kgf3?k4dLfr*1K{gHAeO$wjccPt)jS+ae%mx8~2=#y?(qAnsK()niYU9I<{AWv) z;3e|7)fY~MfW-u6Kf*tlvXYT_N5IC4J73#O;Q%xUqAHFzP3(y&*oO>Xj)BYJ zj=IAeQ72amt*v3%e>w&@s74Ev;vIzMp{ES+8JWz1f8>KA^81Jn!5w&}LUISbHU->) zp`~8{3I9BIAXUO=bfD>KnwW)eZu-9}f%Bzs!IPP17L2L=U$cP0>^C!`B3&@E2u9K1 z+4%pw`z`O_4H`2yg^ddU{zn}Q2xfzsMKFps20(c{>lm=!j`X-D>P5p@NeCgIW-l5S z|HTP6{!Tv)u3?Ci@Pa#=Ah-j2%+utU;o`;r=T83_lL*I*U~JA9BKLn@m7w_ocFbl> zv)~T=d+5P9?A{1w5sIS0sYd_j-Qc01xO3JB$86>l?VrUK#;<6E=0#7S;3;0@a?-qL zwvolgXBvzitlIa_1@?n7GBPy6HAypPgZ|k_ejMB(nAv>{q5I$IMvsG8BRsR2)2x3M zKKOHH5sae!v*5z`c?WxDGpA_(EV%IJ%pw#;gL5E$6|c?+7(LRBo^do9PZ8gL1n(S_ z5dwCj@z(qOC(;DQ!5xAzI{$2%z&MyiC~h=dIQUmH#NQe7{5i7-M$!IRaAEwsn{hXK zeD#w5rksvHXBNRI+JGyFu#uBoZ5}UX4Ns%L_sa$+6DW5RqT4hUpvOA^_;ggnUJqnB z11R@c*TN7kQ-`gIwI5_V!?kS=3>Co|L!}$IJQm`vUsldDGuB5VN|X#1w;5peZ(+;QvWBR>OKCB!t5K#1l+|D$a=ul&=5e=G;hUb8yBQI2I3ONUhF|l8Z#?!>SPa? zBm`&h?jRei`8@fXDsTZ?Lf)%Aeu~dvWqthWiwn9E^mgruQyi9ggpg0`{#86_Swp4C z|Kpoh$Il&@8;~c-#tO}Q2=3rDL9nks=@tl2=GCz6#e`D5AHOz_eKLj!4yR%x=?zOL zG65L&h$f%j_>!jv3X1;8rr{w~`v11!VFlmDE*X%UHge@;u%aYVRPg1BPn~MQkq}g- z4mc}+QJ4qY&fms)u!Z`4pa(fq1QO`M31oj~ls*OQgIqI0VIO2cMq%HWs|!yD<}$|W z!ngnoged%02jTP8eyf8pG)5>)#0M)3%(f=~FB&;m9u+PGfwOUH+{n^@vT7urL)ZsY z1)=)`klZN6B1ToLi4zodL?Qo`9j<XCtf+H4^CPr<- z05}IV?f1Mv&>k;-Fm(_no&+yKhA_uLQz$TmE$$#>Gay$?;PghBOF8yx|>XL)4!Bnv(+>{@a!62@`w9MH@!RhKM~3{Dn&r z!eQbN5cQv`EjbQ84Qc_jGJ&x2c;{6ZJDzl$7;jqBpX0AU?<7>CAjL6JowfV zMr3f0l1?%{l8im-g8U{JRDe|qvNZ^eg3TXt;RSZ(Ctaau&XfnyuqFH|IZta;l&CeHG(G`gc`wYnI2Z*x(xqk)d*I>1`v-AF|AsK5Bwf6 zH4I@;|E9$o!74&gvT-Hs{nMO-@$nh6hcJFR6hi&q9^$u$Fslee$#}=WxtZ~taZ*uc z*mdT`O`7~SWPDIW2-kIn(dn@*ef&jJ#W;9FAO?pI-oNp$O%>x{)d+uVre<}%F`z1c zrR)6XtRfU8`&VI}@pBIL$7W2){#DfH&sjw%N;U=%h%j8|nXZ4s_pf3we6WZRun%pwpM zdax?QKj(-|fLR2iX#Xmz@CUq`c`y23#T6#NECNw9cuP9MMovfZ$WeEtj=EoRa4#f8 zICzUZV%Y^wdPZh&!q+A}*_H#6dBo=Czo@JT%%9(vG=RX4-G+4`>VY_w7158JVJ>FenQ@8t?6UGUKI77(0ug6buLMtH2 zi3>>)^!DqCQyi9ggpkAL9_g>Z26)o_6;523X{hQQWS9`X%ztWU>iD?>a|3cF2cZH4 zckr5^DRU=bcNQ`64%Lm7*jsrtSef;v!4h=I4VKXyhE6`liK*QV|qX|M2S+hFYVv!Ow`(KD2o?(V7GIjyMP82rm~Wm_0dA!>U!{M$NEG zXKZPviG3Mpfwxitd>H%h)fz&2CQmN;T6`NcA3#BWUm<6ynes4Fimh5mubgMiyu@ z@Ef)mB1TigKp6VL5Tm7K062^?Lz`o4HI86Ei(&-?q9kBF7m9&P6kNfT61*b831ed; zv6XQnT&&wzxuF$Ph+R;iZH6|`u)rLJ1QU`~SOj!n2H*+-+{9R0+Z+v~0z7Vrk(0wZ zSaF7`^dM*ZG20u~ZCrrj*dv4bZ3pyO6f4D8=3svi#nPnz3)ZIW$9@^OY0%ftx{Z2kz(A8TcLi!b*@viVM=y ziPwe&#dcYFd7;NaJcPaxu$h9qv=O{fgsj#dnyG$&XAEbV4L4JRUeb_t>o;1kc^hU$ z%ttjg{gw;1Rex^Ha3?v~qW{hn;)F1P-?~C@RWSmyQ=G$H8$;_6uG2_EHKP3#St?xI z5wcw)M0bjPAFNqJIN0MY@XG}Q))V%Lu;zw`8=}g8#oWN=5#i9{IIvT_TnfYG5Y4#Z zJbMH%1pD1c2yx1AAzplQ@QC^HuiQ;DC=4TY0g8|Y11NS@1aI(RcJMXWtw(zP!yNnZ z24K9fx(+Sq;mGU1&KpRNVAy@eScN-5{GV0$OaII`tNzvO=W$p(2&RHrMJP&!Z@KIL za}@$H8Uz^O75EvmhyQBk^Z1nvX4RA{83>KcjIoFb76JxefZUjAnCMCyYhmG9=%N-z zL&r?RaNs-qWe6zd6E;UnTWDgKc}=)q)4k#s)n~#|*}$5f>COwlL5-fMl`iSYou$<^XU6&w(0R7#kZ{U`&Qz z)HF6SLTiF?BN}R8*EfbY(>6AM&~WHsCK@{M8wYxdy`Ts{Yuu~K7%dCkffw+??mM>i zE;L3*7k=~Lo?1fvqH7GJ-3WO~v z!g-m(bItJ4WDL>VC^W$l{HX*Hf)1PINtbinot6ZXH$N5R#xtKyE4krJm?0RMFb*x76k5aR zJAsHc+#Q6=O%P|6O$DuITCQQd072_v+YR7@t{`iNh-3+NW5~qVG^hi(_8sqh3gf$z zZs6GF9#ksN}>;8u$f>y-Wffx44-7P!GY1x z%rx2i_VK+Ie(RV+g~2cYQZRTI0m5H}7wD1x6MSv*)eCX}5f%-Lroh-7ly-y&y0D3w z`$4=l9bnw(xlv<;V<7en+slEEjrbCs@U?@bj6g6swlPH1qksz#mNapoYUHMU*waS{ z^c3#d$IGP2j(x{!b)h*2Z(cdxj>7C4v-N=C0wi(av>&9om^xg64L7_U&_#p+8A>Gi z&j1U9nlxrs;Ov+kAOnIkc>ScQ!yVXgLt#P&9>!1cncTCFSAB7VVS>gUCQfl!<`F_Z zt>^@z8Ai+=00<7^*?@9)de~P0*D2lJJTpf<426XoA_Zk5`+=J{dy< z$Aei6T6g4R40}YA?z_Wj4+wh0;q0G;#zU&~|KNC7_QKdD1I|ccg>HdW6IMB-YQpdy zQdHn}lP@Z8B!qt#-+w&JgKg(;<2=|x{WkdmcBTj<&}09rqx30YALN=53i}{)fQXTh zAj_CHr%eauGUW+SaN)z>>L3h_5egIW!3zJ+fEt1tCC$qir;koUn3FWDo!k7-VVV#mcMa8RRBsDd{OYLLn!;hO(K8WcM&Zh%9KaY1Nf z#V3EY#}i*jG|07)RQ@j|LcuQeuM~1z)FzBfsB!QHq4s$3fWe@3(xVOzc$HyjQI3!y z$#Kw>Fq5G;aM#KS8j+kZ!SO&a(+Ws(!0W$50#3O3P~)dI3{k)^25v6M^ims6Rve-> z!6rmanAkHe+A!)hMC@T;E}TdMnRrBKOBks#11{CDy6F@2aR#*jR}JvatMF>Y|mu{~29Z_(~Q%8Tm1Y4Bu$p$gtZ^`wRs?H*Q* zn*h%;!6!g?&oaT+5T1%BPcHgH^?^eF|KYPt@X-o(ZxG%Efu3c8ulN7VStj@f51)}R zQF}8~eC`0Y6dkt0+_(=37(C}>EcWKltN)L_g?3n?C+=z)ysq~+EEBvGm^2uk;LkF_JA4SR7?+uX4^e|klp14?#u|?1&(J3R zVRpsKdonUd-coJ2B@SfDC07``rFWh!4XgJSt>)` z+XT!WK?$37AjgR&C_3%`yy8--(()$Fmh=YU+w(b|t(9~;qr;%BemRjfaW zH~vVmro%({OBD+;e!o>LIE|V>9E#Bd9m=8m&Cje5cFO)6wHbM2(TMirRh#kG@$N77 z4G!l7g{#F{X6^PTBBCLZ6ctvoR{I`GX1#Q8{t4N6sO2OZEIuqy6*|f=|I~b9_N}wF zopK>vRPIcAb*rXU0E3XwoY@*5mQuE?Sb2=<;5<=cVUnnHx>E+P^;Ra_CDV{Z(R79@ z?!W!c;$31=%=PQn(`?u2pxO_Wnm^1cxRRZMrawqSM1uTtlz8;TivESOFwtxw#2+bY zohRZe_^HQ-$TfLA^+J)k#FP!p$9llONb$q*A5GL-V{LMFkUA5Q&R#&w8{*8y#Wh}L-X)hXyIHRhIf&8wPcdD-6MwtUGQ!zgu|Q1ZJW zZTY2!T6W=m2~N5eaxAjh}KxaFY%nfl{Rl*ku!}AD$=3FY}tXz ze0^UsqoSogI(}rgw+@y(>7{W@NQm-2vqTtulf;?D(xD!C!Or`Mk8!;URMb%}73iwU zZMvZj#q0ZQ@^p61b==et3_7EenCceVvYyDu@G{o)4OZc5gT zfGd65|x zG+VtSgF^uKUoFTpD}0mo!GJ;K@@ab;k5?y7dNhlV&G^t)by{Cr~JDSrQi5~0nHy^Qc z_2D@$%(`Ykrm_EJWx_sb6{BOnUx}R|1@dFo~1*uMjbL*pC zUwtOo%c*^O+3XuAB2>xCz|-Mv`K)xoC~oRd_ND-**H7KUd-DZ1owqYcwTlD2K zC*6NQTKb5A_>$`p4QJFi`^ zW-3T^$h(>$u#Pcl9#$E_AF0g^(D_y1|4UZ7HkUUyH>=;K3OIYhPuAZ!s+Rjg9dl*I zg-1dwN%}`3^*h?;>@-&rNZ)TXGWPFLMz$u}+0Lq@ZT_st>;2l`(@R+dLyaXw0d(g*t-UFJ_7uTLfi1%>CX zc+kkmzTgB&|KsE~fQM{dMA@*Gc&D5F4m#T$qr}A585!68LPHPHZrr$WD+^1UYyIVu zVY7}2$;stp`uOk0$9TMN|HcUpXvv;KGo%>!QkT(rf; zS@TH?c(W0ebLkVQ20r2r=I{%T*k?5;rBj?feR_>@if!wu73%hEck~NxD&0OJmLc?5 z^oDTR6F2g^N9$ByKE#BRZo$E?9r}mJf?+!gssq-9bONt$uuBFHW-Lu)p|~a&`;NE$vKdYId#k zs_V*eH($MZ^ZmNS1>IS*g&u`~j=aIFI4!jnjLb5bB=+tpALGM`MS^vo9+gPlG?W~|EcJ+jm2doK+c><8dQ6Y zMc!N_1t0cPXU+Cl(iH1*{5X$e!{d3;wr$_Z%xDfvl5A1Fc}XYSq0QFF$jI)L&FIc`C{)nADAk)Sf;#*NKSFMXT)I)qJ@ z^44NNoh=8Jm{ZXbr7@shb)k)xG$m)G0iMgIw3pa@R=FJR=OgSFkKj^Cq8tmy(o>EMoq$&2n{x_i9S)>Z9{xRdb(G2=3`g3cPMynp%*5^Uy=; zD<5?uP9@PC<_9msYL+G7OHtyrP2YN-qMu+O_`~3UIHdp z`&%HhD&2b-)3=dg&pKN)U!3-R#=niSfq{5=V_aJAcGKQ6XX05(6b!Vqw3=9oyX!`L zdwmsHy~7&-;pj0@EO9_to2XSXc)(;xPfzcp8m}$ASH?}=oPm*1%BCSiCilhx4z9iO z%V#HxA1=6gNFjJ(b#`_z*Y3T7Au+kJs)>0(we?~)N3xPSJ4XO6EO_pfmAr?@F(bp7 zxiEQaeNKSa~Z&zvRs{cY5|k99{??r6Ek zTzmHXIr=*o{(;KZ*PontGSp8IKThl#Q$As!<2?*4b1fTPusWz?kXZ-lY}@y{z%{g#X#I zE+N)bPc~`JCke4zxqiLuJc@No1v0;!X$juwuItCdcuW{fHtB$Rx0Y{}1+;5o%tyOy zt$ev~HTPD`i_@zf(=iJ)zu`~<);UCmo`f8|N6cAv4zF#?CT8{2gSrQwo}|bQb8lh1j%?zrFDG|4u4+#E*><7m z&cU+R&r|#xi^P)x9nTr5rO_#E^qC*9X_@e6`BtVoDs~=MP3jF5)4GbbzvfLocQdp| zoX334F;{Wjn+&M$OVu7M6U(ocjlRBftI`!9!_Rb{d-8MYRlN@2v~DPiTeC<)BI1QS zDX^fFBqc4XC-xA9Iy=t?4(2r7;UExwSuMqu)2wn=B078R z{#^xY=3TYlqv&zUBl>JK-KTuAyCgg})g!jlZ2a2J@sR10;0-@e(_~5p49J_i>HIu}gj=_6Wo(oUJ8hEqHh@RXe~Yp= zn5633++(+vH=AYUuC-iGrgSr=`e#~HU&KWpsk7&FsKTun0)Cvja3d*HD7GQZp@L~s zWQ;0X_J#dmN|6cLcF63Z{DqAC8wZ+!ao=}dsLI3UxbrG9>!-%&O~Y0$A!P+DyK%gn z^n;6wi;_v@Kdiy309PisWT`vMhSbC+Q%H zHy1zbTOV`7t9)a^PU&UqL+ZFQFDqV=c}K=A%ikz^JIBo2oQadOMO#fxO)_T=@Tw?? zQ+@{8XrJ(PcGkz5_*G(5WtlfS!9OFi#s`giW0XuU?DI>imiE}BdW>#Mab1G+(oK74 zDtcrqd2jnh((5Zeh~Z5=Sl4ymziZW@S07~A)(UJ(*xa|rrtSNI2=gjK?i$y8myI;n z*DiT|oQTxg&H1Kd#uj?fD4L7(V3=z)RBkY7(1!c?2vxj1a|O5-%RI}zUUpQf4b$;m zQvVqBIiyqF$@;>N8wD%)8p9egWTKWZrCgP%zk5XAxbUt!qion7)dU06B68PcRZ+mg zR2rHMm0~{wSs0xKgoPG@S8&}Wxqjo8PxYbPqD14MCQr|Yi(>1A7nTgz0b(kORW2WEh!+a1VIC5pQ*t|PS$aQ5+8(WGPPx^ylSQ$2 zIcBABigS{aBrT>ql-;13bcDx2ld_4Cg9^OrxKO%&#mPdNvW*~M{rOC8tI1cre91;YYH-`Ce_Xi64_}T^7cVIT%YuTls!5`ROL`uy~G<4_nB& zG>r4dx`d6g)K;Z>pBGa$>ABAlw*k?^M{M_mL44dOVo2G54SwgYnlDSOOrg>uS)r$T z!bf@cR@T=d=@;H)0g5aDA)42Ez2nZ|l+HdpkK{sR%s;2GR7h5=yS3cm-TmOppc6Us zL+)TZ!G+Ta*1Yp^;TKP(zp#FHf8GtTaL${TF{CztpI2_6U3+}hW#rEY#Jx)m_m;|E z-EmMkxb+1q@T6#~+qIe*&o~Hoan#(iGK#l;d6eRv2ZLg|T53qd^~Wnvj!ieLYTi5` z=hS}(+Am}}n(E!Mo74IYXE3il&HL+zp2~64g$k#8%Arrx^rX>>?XB2a5X!e&qGCka^nHxrn}(bAIZ<26AZQWKH+Wc_a&Ln z<=cBxW`VV7>XB+cU%c&*&t7r&vnTrST^~WS?@KdP?xKSA%AM7Wp7XgTv+SGGHJg~y z1RIY&I?LO0)9ahyoebx&8z0?t4@+9KQ8qWyR+vjFiqdBFrV+-TUu~yTdYdQ z+`VV6?QvhCEfv=o&{VTn&qjSs;|kY^iV}adiVN8uor>w}s(z~Th#H67r=6ScwJ84H z8W5t)+B2UvZsDD)f{W|sIPUgWIvf!1>Jt=IQ@xE>_kFkVr{$K)9=4*DeFw`9eB|TQ zc!B!XZn45++j>dOD_K_x#3|0s=1F@=lG5&XX4jLDIfCRrgSsqO{8gg%WPEJS)u3+q z#wl6W{9d!{I%l7kyn;j2wzsP{8l4h9F#D&ovoa84yE*!w#c3<=2`_Jzh)e-y`t1B0 zW`UGVcV6wgp(dNnFR0d~^QqX9Tr}nS8XNSYok0}O@~FLoSvngOgE{Rl`mfH8Og8Ls zm_ya-&awPma)HD$k4sw=^(%Fgx3-}hOTYEnT|014go#d2=QU|!&30SI%6$yLypi9RbRj)2Ps>dCHl4YpUFG!Zujti$-svaw-I870mc_&- zMVl>eFO%}Q$t*9lHZO*Xp3FvBeRlHC#>FY7y#O245*YwX-~%WIbe4?-Q0k^x*?~_i-6o-TiI-D#7Ogmb{nSHtuFJXV zDoZ^Q%v&6b+U+ns8QHlFl^^H4lwBS~PeIGlMk~q$qOtd2{CnvY)rnDRJ2}t`!Gn9` zR8KoIxbn-s=c7=wS;Wu%uq1Wme&^DzhXbbZYeh2O~bi>F@?PvX;G z>ENBr$7C(A`7J4_*}|PVr-Nw@lVFu|CFsg(EqSL6h2%Yfo31_DJdf6x-A*K3pX1b5 zuO&R9h9UB+XWQ=qxCPOy4pjQQa=#a1(qw52Qk!qR76x{z1dp8(C+`#j_US4UZKasx zoPB&)!?zm8@U48UOS}H6EcFU52G`_aZzcttbG*8c#1RXZ5Q(f}s+`p+SrK{J)n^66 zvi((9>uii;oeyuKceAGv#2n1?AJFnO0LA*gkTx!p26V9_BB5`Iwazn-le_)r%e=|R zv@83PnM~RjE`KuN9k z!v(8Eu+Lx0inqIUrL#*MEL-O>NCkXXXb%fUZ&6~FrTt9XyEB4ljYHPGHA&~Xc7*c# zOR-t)i{vi8VCVKE%kPTr%iBvT@0$f=X=&xSz1iY_K0quY+9kSoe_l2dhbuLI({bf4 zWp3)PfJ}21(6Zp6dN&IQSE{ay9>o;?_;IIq2l?rPl4UOqPhLIP`A&_h**xu|)ODv1 zyBUtVd=*zKi8X3=PZw88QX>5s<*?f0XV$q>_7}mREZ&QxdtxmCb}r$8=Mo*Iy7I*# zhTmgNuF*ZJ;iUbnu|C1!oA4>?y_Ij&d8=~QZ|ZfwtR@-H7j#~dJmFG~x8_TxZrg3e zU0*+5m3gkKQg~&>ssQ@+*xny-~vo}d2en*jVtg2Y)iMPSoWS7?M z+#U3eC0E2j)egP&y5k3t=G4wWserAL1ptk?RdzVIu#QbnAthJ?RkCFXhj0|obc?Hy zegA>#g}k6^T1?+>D?Ic!I+%t^>uumJJ`l$O+}nMsJIgXk(_wdrGjXfEwMg>GQ|mmu zy&b=Hwbr)A9eTLj26)zGXBl>XIdSr&j{oMp-%M0g3T^4i`y4HHPzf>vb&LDF-20$S zCrk1AQ$jMysRuK2sH@*xu}|>qVzkP&Un*F>YOAfgcjJ&J zqhRl;ydZfhn?oD5BVPc4)>=SLA!bDzKIAGIY8aH9S9#GHo^E=CUQ8nYvf1AC%`zL8 z8?`EY)3IRC6nt*c@u6<315IUkLQ$RO?!c`FqU_p#JO*&i!Ux?y9x^d9o_O-)$)+t^ zJhMXhL$Um;qB`|fuVyp>} z?2KH4*c#f(Lt>vptIr1o$qEZQ{op$3Q;@KD!0BTnQKM?Qb1v-*uTYg{u!5-4RO;~+!fD|hK2Lt^ zqFmni$)6Y{a9bf->J>n$4v02?Da<}r0??YBZ67q1v%&?#MMdME36y+co&&_x5ID9~ zn1uYhq@CrF4L`n+(bjv*HP!<&4?N%_vy&MsA9RO1cKWyVCLGT~t4D++uRcOQ|Dpn| zFvB%7-Py&ev2%(KUCA)yD2+FtRmc1FVs20rQ?2+lB075R_qWa#T(dpTsrNQVMmpGP z>Gc)~CKmF}Q~(eK`<*?z*t9y-Du!Cw)XnK&PtK0 z-WD3ES=}ygV6oHou4%o?$3FcE`L5Q)-Ak1PPw4G)k8EHiwW*FaWv<;G%6~zodr^4A zj_UzYY4_r`B-PAPTj+gU^9ZWTr;@ufbk&(ZO`)z!|rYuuDgPj>IUO5>VL z$(Y7`XXGSpGV3&7AY#7Kdix4&rG|1?b;Om@@MYS_bQ(~yue=lMmB`k)JjD~g~JM+*(Knb zoEiw?ji4(ZRTSwf?}>k=t!#}4R~@&$Rd_qwVntJ$8m~|-#f?ixy;^o`-Xgc)eoZyS zHF-CQO=x9F+pf=y$Jh;vzjS_1+S#IoGSpkCT4H!XZE=(A%jeITI~rfEr!QV4qW0TpeW@=_2_3D%1AFUhRhRH9-WQql+Es2qIkLNe{kbi=d+z+sRjW_P z7m4rm0l{lbMM!6>V6W%8l-h-r&+HzZG5;cXFzw{!)DQ0r4OvJ#?Kba?xr+slsTQ3) zZrl1zY&EyRCQk4B_jv(|vJ6-Z$(Qm?rLEuI`@LSnXXa|#@>Mv5TQSYiA*j{Q&kxhn z(KyF&-_3*Pc~TRaULFL|S>kS`y~MJvJsZ_;L_3{M`nf+(Lqz;t&Wgo*TjLTMZl(z4 z6lV8IOGcdq7=V(YWoFfxa9(kig{ya+^zJ#Bv+qJ6Sxa^e`KHb6F(29jEUQyHxtXo4 z!t%0$tHR~u?do&+xs?|#WvEP{a#L>W%ebTK;iY~|F_-^VOz=i8Pwx6TX;lfehAb_z zDZxh@gJjk_xNOXN5M6bFBeb{MD^as1Z*ff^uBsyn%Vi16uT97`Lxn~LTsA9NYh2~e3JQ~HD`YR8t!st zIu-1yW5}7gKbvXv{k)2lpPDB2@-J)JVdYfK!B)2=@Dqz?BU#*;Xh}`AD3#myq9fke zROjowEe=}QyZBB;_tI-2O~%GqF9Uqq_bpZ9JHw$aC`{$+)p*I2;`H?|z`@JB-l`f| zm$W%C!meOeW_NoA@14)r16N&STp(qw1)>+u70)b%^11rsk%->lD|*xd2P#Pk@MvG3Cl>+b!MST9xWu67I(tW%Dxz zD5u_hRx6dhrP{hIN7l`etxTOo=1N%P^7fZ2?*1%DSX*;FGttq~ue-DKBgY(H$EqG1 zM+v8$sPMSHyN4b11T3X!>+3cpn;lekQk7WI+sz)vdGp17C;z?8mrUP&@uF-BKU$l| zdr7n-k(MfYcWK`{pR?V92Ze#}w7f|gIP0Y*EO(T#{&i!pPyfQCpst=kRr7I*=H^%2 zyP3Xb1Tx!cwErkx*73=jiZ(mmKNr(sj1Km?-gQ&0!o2DNx1lW?2S_WyygCo-5}}9++2WOOae~YrOv|PRf#($b)$n4O)RtSP=>MhF5JBS zfZF2cEd1?fH+&DML!))%-|1^^YK;D@VArG88pCzfH0r*~uI{yak8HA0S-khAQdPnx zJK%j89q%@i`RVJHyv8jk*Tvr@{N;vtlQ6+%zXR51r8bb;&__08Wl#cq#>q*k8xU{Z zY09P@CsOYUT-7qUVi)$na!p(B`z=gAKg88;@9p-J5Pj)x_F^%>Y626BN@yyJ={6@g zC%W=m73vkW>xO?7YqCBRQ5-JC>$U|4-a*`>&n=;PQ=qlzynVQvAA(AriZ?&!-bsekC*bQ6G10ZIO8D)-zt| zZazHS9S+YrDXF;RwyD_s-06^dgh$pj`QaLy>ZPlHsCqkQJSo{*=+6IQZY#B`k0^7^^$)Q&0RCjsM50RkR!0_|vNyeOh%Z+AYfc2GPSzUClcrL8 z53j2})u#mLnhNth1>dLM>)kc~`L|`hEHddjIy$+k&83%u-)k~un%<@f{=UXHs4ug# zLM&9YV$CY61u)OWdb3wn`V@o%Gb}!w0v(C6{gQ!~CzTU&H6rwkC)-@k|mD7|cc)aT6 zvJ4-37CoBv0mpsXsX8K)+KUSlH@S7RE^MzQdj--}{Le&a0{+rXr<^DA>@eEWAB-S)F$>YR1$_Hy!? z&OR&ddDjE{0wh#qgkdG}nLf&~Q1yUl!eXs>vT&7(5c+J6qPv(Sv1%zQ!I!Svcb+@9 z(zYf#$kjEOK_*s%lCsHYg#&+_;Dy-PzO}8@*Lc7$(^I=#<4I6dIH^4Fp| z)#&EjDn)JU#jih^^Tod4OAA%h2%&Fz4d60G6A#by48!}|L&a_Mq~&#<0?kwQ4yvX* z<6fk@wr@q!(K@}`sIZqxc|D=IzMrT+U2~3;iRH5{x9ZZ(V8cStk2`#eHivO;D@;~H zIf@iyi4>j=-S1-1ImhNaFy;GL)%$Um*;w2Kc)M=dz6IQ~8VdvYBuX%^c_n(=xXQfD zE1xT?d_JopcJ0c~xn?QlS-$Ps7LJY8f{M?-0DO*tgxHoU$qGY;$s0T-f*RU3GOWEh5qlz|Gh*FdY45P3l(lwj0t!0k`h>ypqyj8!zB{4ggFL2A8fK9$d+AZe_I_jNmi^T;GWoPb4(k#?(Zraqh z^`pxd!It_nimt-8*DhAM_S9;sUtx>$Vxp{BRkMn-DO$eeNI7}lMN<*UboEq!Nirwt zO_blaHdM=oOPr)w7qP8)J^4yn7SE1tvH~J!EW0n(}b(GRXP+1Qi8j2NmqtE~u5ioVqJfl_K_ObeJ;$oDC0n`lKaT>3A+#&0~1A_@;=f zuG1!~9miG|#N}3TTq{sa>;9qgIZZ*~DrdZ2%tVEsjq}nh-1Vnbk_BIu` z6!0lzM}~>@3eM%1KIv4VZs1zF+EcXU1yfSkgHP+jm-v1bSi53HV%f{}PN$bL-H3j( zv;C9bZIIp&#Jfy!_m0#Uau8tvkG?;&v;taG>$NepKeX(}gJpa2PZ%9qta@hE1&#PQ z%**QLRpn9M`gz7h_wd)9(#D|-b;mQBP$hdsQ}-5GtMt9HwZ%N6VKU3caM?$PNd?Sy zqPdix?vnEHXXBi=*43>W$(!7b-$XHHs_r5wKBTuWk*(%^1gC}Q;^+A{C7MfvQSbHg zHs7I0DMC@bj@-rF@7g^!DR*ePja@dVTIXT=x{CX-+9(d+;aCUKE~ zh>$-sKlqeS(90Xux$3}9#m>eewVo{#h@(Zq1 zR#ul96k;-~1?}VxDK5(Z5IJ9yS;@l?`o0f-p*7w9_2iK)K#=Y!^AtAJ1QBnnQ$CWSU;W&KXkyY1ceX;(mI z;`OVH`z)%nLM=cVhWTeZ_pcJSRlaP&q7Bjz+K@+Tv+kkVTfhjzEy8*0-PQ1$vIw%Pa}CtEoFs zpVKlYcN6>OJBQ=9klUp0e`EFio%`}+zVH{RY=Qey6WPnaTz@Kg&9NXw?N4HrX~oIH zn_iYwEUUd7`PEA?bD_MLo)>@8Fi7GX_g(=W0Re&D*PYGPd{%Wki>cd$ML-~m0ugC{ z5Wc0ds!`u73xFi5Wc_j@pZ5ONQNu3N%x~dwI%=4u2j?<1M%v#CEb7fa-BSCYV28k6 zy*dh&A_c|Lcc?I76UUxT^A);EO`F`wtJC9s&crU+-Dj`q?XO~9_l>JYX*FMv0HuQd zIWhUws(fp|Jnl$P@J?2tJ#)(b@k^FLWXE-#uZ{OJL4fLPE%)mnwdGoG#t{f+--6Ac zuT7k)`eE~J!cGQ$<#$a^2q@b^%R<&`oJ!VfQ*9*}_#jFuO3JIJY0qrK1_z}bm0ezD z3G>#abKbGbAjgUY@PC#V>~p#m9O#sA`iG&T@guU%NSWBqmZ&OD^T&5k5_>$vD?9f~ zp|>w`Vk*McgC0JQd57X9{_ViI+5}KWnapfWn=6IR~7DgGM=RHSqzNSd6)-5_w7sY9%gV*H6 ziM2ByP)p|^DS6ZNQ6fCIGVyFp=mGNRQ~cysotAv9j5%~oZ(ek+u@P`PA(k9(V%5u2 zP}N-Bc~&;wS2p%zy!pIV`y2OK)J&`2JyZ|-w)<2G@B0$U_h&9$e6jQG+qd;Eo;}M8 zW7R-i3XWpxJoQS_j}aZhXZ~eBz;&B@LzdLPp+KoU^)QoP26df+Hrssjw(X@F`TXB< zZV7mO{{ZljD|2YRoe_Q5{LHxV8kdc*FvpLY_ggl_bcJVUQPnM}dPa6I&CE^4J|3;` z$<=+ybA1d8hKV+A?a6vo)~;i6so}R%=_yl8ZOugTc;8*AJKFsLK+O4HLF{c(lwc_G zBemPe!`(gN)-Bf8uU|)U=w#){3jP$mz+;@%*`C%HY6n1RMSys$Px>4rv1j>9a+F$y zo>>f7Q^R5^mxFwQs;f3zrM9+w^p_RAKDQ-$2VI7w7?m%8AVq)r>av7&#V*|W@KdR6 zN5^4R%2>w8&5=JNLZnDKJ6no-9!M++bd-}tOG`het>pTgo5l{70xkoN%Nn056yNy% zi=yEeA@ZI!C2Fe>5Cc&uwD&&o-NMor-93v6tcQHt9C^2ftny$+gn#x?`O-?GB=5be zLhEhRmryr1uJ8{El3>;=hDB0ffgj;UpYr=NwTW03RCOy1R?(LM7n$y-7 zsBESzlidzLzf0FdR6&<$z=TzyqhtuCG08NJBhQ~g%P)`bbS0e5x0j}Y)?buTde2UK^+i9v z_o;Out&^u*R!Q-arsn42FR4Wa3GTG*+@Y1;*TVolTq~(~G66u>!mj<95I=(+J&e2A zd(l21eVcSq@j|xxH0`aNP3sfaMw(UaR=n_|OYopk36Cn~J%!yhEgvtVO4FnKzWy7G z={-ovA|mPvbCTj%a&pq{HEquna(Z`v)8Q1?OXe! zn48VxtSO4q+OKpeBf^y%(^h@{QsnV_`ybSasPX6AH5SAe3z-I0e261IlGMJY@@7a_ zJw09AM(MSMAoFA{!2VYY=CwJIglH>9Dw!NnGb)+IWBldwZEr{IcR39TN_+_=u^=V7WGKA?M(F>Bako%jYOkE)#HM z*LivpQ`zWutOtapPd#qx$gb16pW0nf&;6q?^n2^KRz>f5*SHdO!DDVJ_|pR_dlL1J zV=l2EEm-w-jyS!vssCn0TgCJY&Gf>Q=JO#fr_n&;cC!elm(m?q)E8A|0zs7i!C3%E zCXU=r+A$BTz0dltb;+l?W;T|{(im5t>f83MN<0!KqShbhsZ%R7y`)es>yQjlp-&Q( z_a>vEi!)q{%*0v9>i-s1;>@(F&<(h(YyIVc`xcJ{1cez~zMTwUQ7?b(Mo#SHZ7;M| zkWcd|t6tI7dDvxvLl-u+&pY=0)AhT(FP+{-S)~Yn)`)&2#&AskKtavZRy*BqoQ%J+Nj=UU3>65zjpZo63`j!{G z_u+E6^S9n0bUcJGBbn@LxSVpp(pu-e@H(5HagPXGO)tC=^4ko>TOz-0^1WnK;!yd=JCU4;K3 zhMJm_7gpv(-An2)DPCvG9>L9ZI^~|^mHXtVlC`wS6eO3yGQY924OS;l0G-Uzl zsHZMIE1tNbXb$hgrUUGsi%kc>>f9Tp9T#O{eILi46ZAdms;BG`^}0%rMyKStM`cKd zd63N8+s)mZxRb&*lvjB^?}yxo7r-F6@Gf z4^J-hVohRvDG=CKaX;Mr*gQD%M{emmV{c9W0K4aVxY?fXf8s)aH8_QvE4C!~#|`TO zL(T0V0xonc+q!ml=EdtDA6#A;e}1m~cFuEp%3hZOt@ct$U6J=*iz?X@_-wzB5Eo$a z`}90zXCLM41z=zt@%fAP(;DwPei-bHqCLY!MB*roWl%E^;^U`Jfvf}Vn6+|O^3Rd+ zzaSwIqjL6nSx7{j)=%+HB*l&5ub(f|+)7jCf!#+khqMFxrde;W%7~pY!DANE8Z+u2 z-b&+~d2|0PnzDLq;>#_tpv0YyT|qj3?Wj$V)9OCo^w$afo@+Ty!TgIu+ay!A{x{3^tq)FW=wOnXSaO z9psUF0y#?AfmLJmxBfQG=IZ;%yMt#wh{IA(dnO&S2N$%6?JvsTBrVLAy z)C8g~o)Ds*z|ZOA+n;GGm)@4hrYSoMsIY$}O<4h;?9W2`mB|}H0x*BXW!bg6NwGR>10AspXy^x^l)cG!Rpen)eko26=ki;^cAmZa?H6+PD{IdpialFkmq7F#=79HUPuj;2-xRG|eKvj^_ZdS13SbYR}sX=6~ z?G~ri7&D;b#f6)in5e#dN#%1gVl(9ss!Y~%&Gy(`my3t0*(G)D=mX=;w^d@gl z0=P;sQ!u5@dSb!V8~{`}*$9Bo4S1J4xpT1F%!lhYpohGzBmm`(Fa%Ijvty}c&JzoA z+z2D#0u~Sl<1=Y1ay)-^)|H^MpC0iu9FE_^`9E~p5@zFprLv2 zvClNQ*{#!d@PvXCLtgdp<3~;gT!T7m9nXVa9$DhgNt^rnXYcGBtL<0DwWq7x0WaBA zOw!OJH{MQEKkR!29_qFF=WoUNMJ{z)LlV1MweDRfyQH^PlV2uQK<75banlTz zBMH+BJH8BJBsGD|X1U6^bI7Wqh1LhLSf{4o(S}KEqxYGkECQ3={VqtuGyn-64R#R~ z?)PMg7+Fd%=$=IcTy`VuQpR*#?9I35L(Ue~c0rrbCgZxl8*yqMJ{0jyHw0+q>mu%x z_C@erdu(26??BA@Ge7!XoxfgVmsxCbUZ?W;{G75s38saf|7m4^kBDTYJ$`T+#2lsr%iT%qr4_C3hV&9}4s%|sZsGAJyIfy-I4YrQ-kDlLh@ZF&sj$8a z7i9PyTcJ|+53i3`WZ}h^u}9+V?ac~^63}ljJY`dzElMsdCA-M^wvryfcGBZuNQMwDi^aQ3QrG!yBExi~4lGK^LN; z11=fri;y|?cXgWp-aEe5ge=uE|OrVZ9nz+I3CDtSt(-J^68T%7l_f%bZlL zI*uZamRMM7QR#b%2?w-%k!#~dek`Xs?L4NjXS`EC$f;o7;T@@5R-x+eBw0~BaOCK3 zAqC@M6T(an^n&AXK|OAp%ZcDJHb{`CH-IfaYk54rRdKzPcgRS?zEcyMN#&%CuL9?k z#mLVxl(}j`^BWYY!gi)HC@{MMI|9Xn&n&&$5e+r#)uMNM%#D!Sj=qJVqiutuVR_DN zM@fJtr4ufu+p6hj3fCKGNE$9ZB3?~fEX?wi1G>4Pq&rce^e4CI?b17D;;lwYXr$DZ zILX}(9O{}ToR-drl5&A^*6#^k3kBd1W|N`ZmlM^79u)N~)nnu@<#1@>c$gV!=|n_B}yu)T~8ArE;`3^D)o~`^&tEJ#31q9{EqRHNl5~f zY=tWJ=cqDSJa)6pVK#glEUQ-ppzct(V{a%}I7V;h3d(8SIQD&3N$OVmQcDd&XMsdY zi?PFM?%wHN9u=PKZF(;{_i?!GWciJK&~K>Lh++ek7D2(wx^$njf*vpOr85Sn>xi`I zn(bb1%U+eFSH6IkOK7dQ2 zW{3febzb}h_TzAWM#}y->=AS9Xez2#pCX`4LEa9ZvvR0leP9wlkFV)hjlwA$!>_v! zRzDF2rQ;t>UgEyK*JXH;;SJW@Q(t4e(v*HBzGs66C3c~L2*Q!eU&HVYztd%{@jaB7 zgdOXA3RXbMK*4jr-0x)I=6<#|?Y;^K%t23bDNydqQvAG3G_;=qSmEQ+Pd}Aa#SMi= z6@9gH+3zp+oh*=@iIibL^2fu+3mn^^1qb_>-rjzuEgt+)&wuvayjxFsS3`B$b zqW;t*TMDN{c!TPeAX0mb(TIS!jVy=@n=XT#vur~LUWb+Il?!xz9 z1cOR>c^g+3u0EO^^1rwMW_?0S#yO`w&dX0iUVE!xAUo5vHJmjMx&!K7iPHi=Z$|OW zF_4?3MB`#s#Xj44baR@I;{ho|rV2-S)N+tu?vgs9_E)=^MorHmHJ}1jI6&53REW&< zmh#W?KDoZw#UrPj1Uk9(^#DtyLI%s}Q0PTh&KePNIL&(A~jbviU*--G`ZR|0?}i>;AsfC?++#;TmXSQVV;Ykvgkg!`>UQRe%w0OQo@th9~ zgG3Gn+B`fU*U1p&=Q7?h5aB=IqXSFGF>SUOEcYH2Rgwuu+;$|AxRwsli}%~K_F`WX9!$9wVi6VVb7a)ty^a;3sg;P zoSEQXBF|m}Ge^DYd-Zgl-;cPhe~8|?doNF?##B{XJK5YtK0&X+kp}aj;3-o``{1A& zUi&N2wtiIP)Vax4AAyd}&ZdvLa&qB&LYY%Kb=E0bJDZ;8Y!f2&fc>ORzJ-B&9f9K8 zEx^N5lcmdH`=j!OEQe_i7&IPd(y_6zVi^K%te!8;c^L*cpc?xM-cuGEf`tN-3R!e%xtS4Cv@L38fTF>QR>T*VE@sPcOn|G^; z@$r(5!EL$G!i;CnPucdBQ7#jCnYd_rS(g}id3ou$xN0>hY}^y1b|tWc9$n$nL}<05 z!a8vF0KHEYXle8D$XQbk0wbO1YU4}hAKO4Z+t^3U43%$her#dUxp;2+3PS^mo&*jJ z=`s__xn>WiM#p72F|=@}M|un~d}?YM@p=>7bK1*}Aa^nmvy60#%AF><32$3A*{?{? z@;*qS-jrHCZX&54qC73k7b4~Iiv>e&di7q&;*)+|Q5K+3vBO1?glNXcu$@fA_X%Iz)^r4SdHdP>|b`82Se3u-QO@9k#(G2%-yhF9iB{DRS@L$^V%uA+pH92v#g1AvP4IB zYK`5eSp^&>CPzl>8^*SEeu-cVhR{ZV{)(b~dg|>^=#2;7>Eh~lv#>R%-5HVrc96T@ zK|)-u566Y|g|Jrc{=_FZdpB&F9@KovRQ>CX7mV(@_jL7^Ro@s>V%^XjGX%`xE-uM3 z_+i8FiHXmqKxkY7vbnK1zH@^I%aIX&a%2Lo9}YS4$aIqIsQN@U8^WO=it3Q+O|TCa z;A+!>b{Df;(!5tlVSo4Zgp-QRQ4P^?$xbI6GUBp1#==ESotrwxVvTVh87Wls{TT}m zuv+dhu7U#Atb)tLewlYJ-(KTpOcQcZ6+0Fs!ilYtvxkCwmY8A z6V{dX8Z^th-2)`J=pcf76i5k^*1UEDc~5q_u*nYj4JQLh>?-$i?&Abf>t%guz;+dfc~M6dPUQzZhJ=O*%&f*H#b+ZL)rIku!m0zn;FD*NCk?MM2qL6jt=N zfq{a(z32m`irz0pHmFDq4GkK((vjUD3tletr{tN%eb(^^$sghJ8Nz*Hb!SDe6DB9T zI9Iodzu%R}a$yKom@LoTmXj}$pWhgYwvi#9A8&iv05qo*5ce)pjV15j1|tv!?71MX zGL{0H@Hg#IHUML){@6aw1*QcqOCbvP-=Ma@ty}g|y1#$_4!0R;Y$d(z312qjLAX`8 zq|$E$wozg;J_sI6a)g4J*?M&-U!Ho2?gMcMx=?_B6*8~iAuyF_dY6zenhuCHQv#m1 z5U~fsz;7#248A>s|F#viA7Aaa6uHu$KYvD&d+&)cgnAegVR&*xAy_I=HT-=u|{@O~b+hF}moo^6blTG{hQHrNCW2KYY%zp0Cw(@G4&v9npU>rWH4 z^p+XGIpTWalfz|l8W#Bm(|J;-Vgf#{}dGcPxb-8JVPGXZn%D1A11P(Je+E2B;vUwF$*%U+hZZ23^jH2^|is=kAOcn4NNG`H-M?h7r?k}`aEyXERAf4 z{$k`dqC5x@tK<2uoY;Y%i3yM2+!mu;(*qnK^ZF^h<>lqk?|pscF>!HtiAhOX;vW&? zZy_q9Am6?{gMo_r9Ssw+f;5?u_p;l zN7AYUHVfQyC;tz#u*-0Nz<=9kTDZ^rkMR+3Usw}u3r{m}X?slzZuQUKwrPOQ_S%rY zz4zNi5KZG&!iu>b^Zmyyv=b4yn{>xMCg1<0x-q;?^ZppDK$}y2m@lyzul9xQvk-FP!`ef9~To5l*+39rB$X{OnO94|IsVV zAg%b9u1eQ6{+CwumM3zt(EdlSn1BT3U%HC7_S?U-D$StG+MoyX_g<+6BY@?t6|F+{UXvDkQ+BNaft^&%%ltBoyl9XOMmO_i4|D2LSJv z`h*SD2vK2S!0abtWnp&prd;mLpRnSMPnCEHP{*j!-&F@ca#2xUeu~L0wTr~r>>nYe`Lc?|!=Y3n5 z1~JWRTnf7qms-T6gSoQ^B<`k-M#+Ky*cpgX*BmY%^~)3fC|_CI_#w->>Mfa9UEW-! z>0HB9xO2ER_(uBeeO2rut+9^%I8%Ke;0^Qlvg+^OiM%IDA3c~&_1i^IFN*@p?e33U zv){)J_Fgi$+PS8J^V?@eK>i@}vzv~*U->>q-}cwKX5k=ypTn?hnI_W76g?zhrt*7B zY2l6(RL0Ngf4@jUA@_4{OQ+PT&USRh;OxOox=;4@4NJv81e1@qf4j?b1|&W&H)Uvl zf7Y6<8nDTG32jlNYhWr|y}*Wf4oT|$e(yy10~wrNvj2WDK>Ybd3=tOR_Ebe|(;0u% zRU<`#SHIX!*ZLVy9Q^k!K=C?}osRa_Z!dvO57zTr9Ye`AR0KV~VgA*!u>ef9Sh@B>m1*w9e%<&86cf89i?{}*3C^nJSS za8&5Sk5k)aFc@qDI0tD#zkf&V?(Sy7p%4Bh=p#WvPrqjR^y$-Q=H_c1@87>SBD>eg zO?A$OV*~3A9bf`MATVl~gW*Z&*d)UPGgT%#*KOZYG1=dCO zAekx#kODpdfz;>EpRY|=-j3~}EUm1ld3y5a7XZ<3u8al9KpKpIXm%9fDnURAtg2X*xA-ea;UPK(xys`4KxF{t4cJ_)j!-211JoF;Vi;SszL5Q&m;H zv7Y|qpaItAR{+}Rl_kqXLZm|iWW)kcP=_Nyh2YQ4EgVl~9Bh>PAf84vwzN{j2l4dr z$FHX2ph(@S1#&)HkR7l9zcZeLfsUuZ*fxO6YHT2eQi9c}#nVO6)YR07KvNYT zK=IQDJbl*e?L*+2;oI$g>ohmU4x5m$&S`xp9|wE?8{7Rub7sU=2H}%wC$bc~C{9x2 ziCiA+fmraYsKjfv1a4;~<1H?V`9lblv_+$~qMAIz0ZTrl9` z$u`cIKq9)xysDLE;F09_ckWtC(e?d%HUK9E>*svsfjvMz*i#2PB8b9_I&UNCq67v8 zssy1C6anWbXOQcv0hhb=`4DniUxV(^db>F*kcAjT4qJazku<=5oC#iVzh^L)AGB}7 zJ*DiU2%W=>X0l|BPXRpz-un9b22<${CIR^NA3=u>$zdP)NP&;1wKRIN z$(CmD|Eyb}0m(5sCHY?flq2KYRHi3SRJ-1+I;VnCaB*>XHN{UZq$bzxcSF{^e!gJ2 zC^?_k$N(v0zUix18tR%jL100}0C}bfy{+>K+=|ce&WCIdKi=8`C%_IC`B86|RbuW9 zA_3^U{XmIkY0O3Wi-7kfZYcIjFQXp{L2c`Y!49wCf=fh{1ULIMi)E@nBSm`y$S^nZ9;`JNK9v45Oa`kRp9#WJHaC=@~v%=l_je-~lr- z0PKg$;kaX1!RX#PCn)mf#mD0>eN8iqeDTN@#eV07x3}N}L3fo?&NPuBtIS6^iU=5B zg#YlV&=%#s4?-Pna^x(Jv?gCQipXCsn1&Hw|RqUkMh9A=q+v z&^Ipc)4gfnOF@e_h@Xu{j|^^z59VL)(_Hc__=IG*e%+_YvIMx@!60(Hss)&nt;K$O z=IPrrICscKht+<0!LaMphT1@0M1iNm2&V+^X^>oe`1`%0b9|jLAt{YdCw0X}S2hn! z$P5+6`)qR4_@YF`sX51SDP7Sjro*W=PYQ=d9}g)QT zu1;aa{9HYi?*nV1?TfChJk`T%mXr6+ufB%hyUHMGMj0*D^&PGcuR~+0#S4FIP1J-; z*4ssgNHpY!gN|cHv^!%lHVc>)6@3d9nEP~giR?$F6WDE)WD?bLg!IsUBGz*q9Y5yUL=Vb6TRGVlt zt~kgWI0Q#-ZEfLx{@hUr^#i5nAz3ass$M0@m$CBYbG}BYEcsmANevF!+<|aHnLUqz z@Z&cx^?cUMa!;=8m94!}8HIVLY*G~ql```1tN~Fd?B(5zv^4qb>};L`7)b!yf24m+?wfD;`kq6T=9yVnjaffh29sUQ#3z<@KYM^3 zBnQK-137EHh>HGlzil-j`Bifg=lL2Kh)g2lD_9E_d+k&io}*md6TygdcdRk8)t;O4 z_9FaLpyxGQpvVa-UX#@(5mSb6sq-IHID`N64m@OoQ+t{U zz+S?-Ohn}NKzHs9$=9Yij8YL(tB>yVCj4BV?p_MkuFT(X=AQQx-Js*`9+*#5=MSvQ z0|}ol(vfIeDKa3E70tmmo(}Q?oCy?Z?}n13J_aR|9Gt)114*8t;Q$inA|2Jza^%#+ z#DuNDn;5qUl-r_hM1TH%2l4 zyJ)k}>C0bSfO`^>hAcWK%YZ|u>5K^IbVk@6z&#Nd5&rfpyZ70@sFp&2K6&vBJqdgC z1*kuNZ)C%w+|9(1yA%Ut?kTljzx9`W;VYo%)E6=z1W&X9wRx5&S6zC4y<=U_FRz@x zdE{_Xtb_x9Z_rKaTe?WEwqN#ysf431LZPMEjL!(u3_5tT@uRXvn(t67aZyVGhAS4x z3ot8({@ru%kOi*DQ%)*-aJGzpw06L_eX(A2c-^+zIP`%h7}Ri4_#kvVF@ndp*ZcFv zXB*0TqqZOB5y%(Bw0QH~lD;t`Y7K=dXCAR;@VNz;Dw0UQ$foS*8;|u3yAz!vjTeHI z=UHoYze_p~m`47hIr%EXn}By+&7AsjQR^q%GI1r{BNnvXf<)X9lf|bSG~LHzb}W^7 zWVN46iiJEhZZ=DB+koHc`1PX|<-*L|Z0gxOKD(&*o-X3%#=Gn=deD4B@9L^b^y7VW-@j)jKpa*l1 z7>VPbL_%x_7!|2M+5hzXU&Q8_x*~;oS*KSNwcl<(iK-UiI2|J*Ka+d7|2p2V<|@SJ zU8S2+m@1k_)PmCnW>Qk_j(*{@$osO;rgN&FWA-HIe>??+WK^uJ9zH960>hhs93SJa zffd4$nQoC()YpU2&EgO=!&Q`vpYy}!3#GU6$786CexNvBH>m|ON%1F*k;I0?oWQ=w z98h3?`ZdFV2i9L4a&CacS99qvx7BW5p!+i4-j6NMCdB_UBf?vR!i@+4ZE({>{~544 z{v}YLToN6bH+Hu3*G0OSg{xat_S* z1A_Rg6x}*0Aa5uD3d(<+U7yqE26?|1{>wWMaP8Ht4*zY5|6LX%4h-`EyZwLh^xu68 z|Ly5lS#1K;&jj^<6Aek7-n>P!x|2PhU*u?g+w$;d23+11tRyUwwKknE@639@p3xF7 zTia?U313;D_mSYvi2iV+#XtG;|Iriwx2A-beqO6cX6mEY`>SsontGF8m`{J_N{V5X zek9%?|CI44XBth)E?kW0kzA4c+{TR@O<10O_~UlHcWYEj5A9evdO2Aa*;g(_`ZLEY z`W;v|(_8w5FVe53vXrx;`#yBnv|43cUG~*vwLD|Z=n|0kI6K29eUN|Hnd7Up9;B%7gY_-%uUwDZm~#gcWXp$VlUC=j)Zro-Kf;T~1rc zu}H+ISN=-$>!p>EO%4|1DwB$us_1=XWs#l$&P1r3bmxHa_LNFwMPwLM?<&t>LKB#D z%KxdEz?_2b?TWt!ll16u{7-9RyP$45q;w@bJn|FYCNJCI(37zAGjuaw7)$<<^}`s> zG&~OvPCsK>8zK7`R{cf=DI+0L13k)=Sx3t~Qy<9gBM!ni*5SQni?!1%(qP{gTEWmT;H&euemxt;e7Mm7a& z(5Hy`&DTfnceSQUAl^rmvc%Lw@?S+HI?zKnxxaNGW;( zPA5?emR4t`S&H6v50gt^P)22lxsY_b=q&tv~InF z6+ii+VLb6Q#nJ^G4UJ;x+qWOa3YD|qXhsL8O=8n+LrlvM%c-h`gfU@phSrfGEOmuTzp z;2jHxmZ6<84ntM7r6deu$?-?=U>OPE1}=!$ibH|T z+i?%+ino(A^G%iI(hbRQML(4!>snJLu=Nd+d)6>)Wduym3P5%GMov+&jLTvuFAy;K zJ!=*5wOhUUfSiNx1ibp)>gwui93>o26pwWi3QcTrLo{_L;gCGEG#xdoYV#d&&Wag1olc*c@lNMj`{}7@ zgBT5Y;+0oMT5;f0ykrP$hE z|3ISL#o{_~`O}l&BZ`Nzs=M7{V9bd$^k5Fm^L-9=8~JVB+&edm62ya4rw~4;6rrb~F;N20W-&lqFJ2`TegXW0LLglsJjfLnVh16?RJ95T(2v5s z1`f2K-IDV{kwAC;Pfs0Hmm)wmLZqDOt=LB8EW`=ApXL52dS{x7r~bon9>&MNzly_e z??^QIv|QeR1k3ry(QML%`Hh{QTAXLGMvEF%N2eH6KzZ`)Qr`LWglw{Qm3OpUEpf-j zNXFCf7X6ZkR_ZR~Y=v})8h2NaBC2Q3eKGR#2_&1ax@l4rD$k}_{2CjkmMmTuQz33B zk8g;H3f}G=+!Fnd@2Kmb>YqUKr6F^jt7PNBxdu*e#~~#>G1KW+&-SdyJOMUn59dHr}Qv*3L=kqoMUBID! zA^?n3#eqX?HsIr$2*_Q#f$~EFI~)!MWK-P2UI4!Kn_Mxd7Yx9ut>!g=G<>;WwW-pR z0D{j(Ngu|a(J@9<8^)g2m!nfTQ4jc{q=ncg3$J}xny)VG))y1xEf!80@JUHGfvSBO z>mk2r`?q`fM#tA%=B0|Ib%z^o&*GC~)~bA5X2uOYGgfY_%7^-@-L%vqHthzi(@O75 zqn?RM&W~Q*G}5P7=COP$xA5&LVWW%e<|YShn%Yga-A%8Zu8xQcepUljXUAkIHZWs3 zTt-zkd~Vs51ay-lb+=|nT-he?Dr=5V=J4&B3|Qq(d78{`Vm`4s|4`-H*!;Zvbl+^6 z^SU30T3W$#yLS9#U<~jkEJ@fTCjiIi6ufZwsV^8XodLk0YR`UE;B!(yjD2pBK+Lb)O|U+>QL8XzEtkvtM_8NdTK_ zz2G;0$*;}aU_`hWXP7J+m*tyjIaZXP%)T0#vzt>-{^XMS)h17h9N!V-7T-f4A5l?t zKjMw@8;p~q;ru4M=&Mlvs|R*V=ib?jYc{Z*A9)-U%(rh`H@}oho2HiGc0D%_%8!nM zukY9}-Bx}{IKpsS0jivqpv=uERu`JIeH_8Yi+&DfT`xKzvw5!t7rVG2iWA|kH>xT* z5z%Vh^+>ULF+iQef2>9fTLFdDyV!&Tmr}TOpDW5){VbK|>5Ck%-JqD0$Cws|V_85m zAGEV|#$nMFNEDA!%m2FdXT~^#aO)$RzRPfKn~A3zD6RauWl{D{S6ZtOg~FPeBAqzX zk$iO?-CEQ9MH%FXwIrjoY;1RzbHR%uilMx$~tm#_4*;ps~zaBIz)h&D>x)b=j24;fUhl0c0*(fa+>v z|KRj^drm&9{QBz?VWUPpZg-xNYx8dOBl_mZ(nc+bKPnccfFgE=RE@1Pk$1p;`#_Ps_pPmF|97L1Jw2 z(*`r6JV(@JeAP>+L_7YA3R|`tabZR8YYP&gE!2FWYrWB~(ISU-+Nh@&TnTCxU58;B zPx6N}RbAcwGDHN129onqx-q(YHHxo;4o*|Dp3f^Y#$v;yCFe7eC_yh?U9W9|J`_UM zg;wkgbCgNG3hVFEkYgD>Y|YuOjP|`U;{WvG>QsrmjQ%{|X0ThWX$&s(Pkum zTqGBzuGmew^jYaqT86)b=a1QIJQSbZ{Gv6MmGKVxjj8hVoYcNVZq@+n?09L)AgY;v zgN5q*_S9@PihgUly2{Al246+L<5Eg#?)lw&+jK{BfhrGAPKP-c6NrjT4${JFcbU5~ zIv`G8_1lc~M%Kl59QOh8ARu!z%6G{wo@wLAb1g;QoSVGHV$(T8<-RLoG*E0xlYmz& zGwsSW`cw(lXn4r%ANQ?gcq!f859q6BJypX*(a$7yFE7eMLvi1zl{2-5h+EeXCMvsq z{55T>jbz6G9#FdWFo0fD-1U0Te6oordNW-(~DoYsffg9w~p_I9k5Ai7`2#p zoNxM7i`)4HHrM}hL5`;N9mspLw5f~y02Jl&SKuk{wS=q_#FXXlxT%L@2Kq@fh$ZO2;;QNpC+5KFCaJ#CF+mv>r zDJRC$*f3vFbv35LK>>PI?+6ol}Ef6-=1^8{e4-MyI7G?mzx$*HmpMaAeTr9bhN zq?i0!CQyJB_}@Z*nrOkB0!%%|MZt=9sB`wq7g2WimoK7(9>iD3=X5@#i{49$hmbCb* z;SN>~K@^vrhNqC5b^8FjWJ7`jx=h1yJ=^sSP7y+pEay=^F_pT&YdGtPZvZZC6Xi?LpDlComRcx zltS{z)@hVYje44w@Ws#ZTB}Mc%)sU?Tq!0Zifx zs8P!uq^juVOF1S(wNm<{dLyRsgOAbP6uAfJB3CJlkLW;GiV$Y~+MX*yVvJ^*I=u$> z_YHI}jqTTyhw@uoBMq#=vB8k2keA9r3b{vCP$ltgXg2GF(8_-46V4$tS!V4^Pz0yX zg7f6j46CQd=WdPMXD%`gn%&O^Pv?~f4?Tv9g&7{nmHd> zrR=LqS?{;!;P_F&uI~Y)+Q{*G9DVFNI90GU=-kaVIPTk3GXEbvR~ln9UN|nyh@)B~ z3-OYVFVoem)8lXEGhUzXQS|vFzAl&GYr8)nk_dxf*-TCk$Yp`UFd;k^_7eT3HTj+T zbJGJvZ#?8Fl?(F)6cP+XKTp>N2VJ7$8NM&f@|&a_a&mICn6BJETw9YV@?E?W9wTyK z_OP}kw@jk4rY(!dn00%i1^E1|n-Wv9NqV0&?dICH>}sxfE?1wJU+|>Zb~I~aTdI;< z@0VW--&TbJ;^2@nV76F8L&R^NFMLb|CIL6;;LuO+tV&V059$}0w){ZtcyHn<vXjyK9kB4HnFKvC6%M4 z<7xK&dG(|k_+;%OQ&CxCN%7cR*X?p$Rp1pJMSpjwSHGgIU{AD0u_O0=S%B z#agpnpO$g8G|r5fe&HDH&lEg+wjQ4Fs!5zr1Vz86I~SU~CLlkXUN;%N;j4w}MY+pJ zHP>iBQf63{8Z5-8A2#izi)qvuMW&7nAyBgld~WApoJviy+Z-N9q3q|t%4OvdK{e0< zHfhqh8@_O9A--74krMd>Qz046l2PZWkU6GtMX3qWLJ=NGVm$yk)dMCG54o`SCj|#c zS5F#G^w97yq`Rxja$f5eZ4um2?$MtIEPO}tcu|vC2>Ipc2q3RIBBQIZjaeZ=DM`u0 z(z#rjS3&2PubRa8L{OK=ge675VWnkP3s|;?HK>r1po|zdAoYgLN=13sH#H6Mq|EXD zGDNfzwbQ-$P6BKPqnc};y17)@uZCXM^h0t&9f>LZ_QqR2E&x}df|b)+T^h#kv%Een zlyc52;hZ!%zp`jcP_@JwT?)4a7=&%JYJ-fvvZTPWWl9A;$nl)ONnXD!?;~Q&rjEk@ z@D%*x%`eANXN6kxp}4)IuIq%VtuG`dSXGp61!N_%o6ak!hH9b*C2`k%fzkN5@vq(T#leo-sE3V@R`WF5apQxPBLHl~Yk6dPN%8-iPQScwf9 zU{jNj(s9bvE`^Q!5=J zV6r4dvFygC4-E^Gui~;k4N-U@)E(FY9~}MShFi}M=KX~8+_YG8HuVhc_$YU6$5WMs zJ(CPGAw@q3rs`hVrB}yduGZi9kbR{P-Z2LP(L5Tn zOs55zxA#9Ah~RI;x_`z32S6t*<%7eiekJ~dlx*Lq@G{t#KdMW9ox`H|r&}w0trT}N z-!ObRmR<(l`<8B>B zQ1yQ?@McxB`#>0L#@%-TJz(WzUdRY)I=FE%nDxtKt4fRfQES>+S2j{HRn|2pxGi7d zanxuodi-sD6I1m~nIf2sHwme!x>kS!jM{3g_+3vy=|7OjF24P~OHlazKu@Fz`W$|; z$RRdNkXCu{>|V{PTF9LvbNx=pu-8AW5%IOM+1<%Sr=~J}W--B@bG-ckzS77dL3|Dr zKYzwFGF<=U?D63h!AwL@LdGX5$hPI=lv8Z(d>4|HTFH zqikY|cUd#-< z%`|Kc2i8IBiMm`CmeEbGsg}w>#JBa;@<`8Ps-A9Fr`p}E_E}kXcji{3(>_&~kYk`_ zt<{a;Xqw$@XPvFy>9>|urHu0So4T&mpxatG+qZ8MEyl%hb8Yk3`)a(HchHL<)TwqN zf`SM!Aa7G<>W{!36~MnX{x8(PiB5B2`n>b)Y$P9xW?CH|ke%kRP5i8PwVvBOH0vYY z5G`-@f7R`TjQmASi%N}R zT4+iAN7&?%k+`pMVf3@MwY*2=3+h&Uf*9 zhugQWoh@@anQ7f?Q|!9v+yOnxt@$4_dy6ewtekn4B@G|;-Y~}1JrsOO#A-;J2Fodp zEH8^uuoL2KQ=zn522~rnXTSv1r>vprf>Td2jLy<+O@*rV4D(^rT+=xRKQKYN{UE9Q zKBE;~7cpGwqdjZr1uW9Tp~isa0Zw-la7Mk{%p$)o)Xxvx>9w;%enQ?jtlb04Acm++ zIMNg=l3!sZrCp5D>cX~9@QGjKz83F`qEmafWFwjX@}>F6w$0iBo6=yBm)_mC9X zq}v~oVpgxmj<-Dw9D5rjNRJ!59^*9heiuluN{M=9t+abO82-K5H7C6alvX2`w`+O8 zad@F1!=x17eoWi;vQ^Ug)d|01X%d4`0~w{6@F8uQ(}FVj#hZIKj&Ieh3XvthawOP< zL=TKzh0tG2^YLfK$rtvg=ZYNkt6OVm#1_=!jn;5+YC!dl;AOLt!WbeS+Xozu=7D8j zGLqATf$%cost8S3{1@E+(CN&rt_h= zFv1q)(k-ByeC>Y*KMwRUoiv&U6DUmzBn05YtlKw)g@z}EkNP*XSoQVsiHQo6Z&R`f z2z+L`OGz$xj15D44$c)sC^E-I?vVil#h>qq8uY;a1AJ;~+|h-i3vgpJ^H8_Gl#8JC zx#--{Ya(eHgDR=Sz}te_++-W8n`4z+5it zc+1m$hrw*2WD0FR;Tzl*i055JuaOH%BI#M|H~vTluSGw>EAP*P!Fn!s&xEp{+Gx12 z6NVeBngObVlP-phmyk*G_pa|z)>xO4th1E!7rWGtmhJ>p^I1;`rPrn%lL;x*;@(kt zlgjWypidOj`Zut=79Bo5!8xxz@IwauC%^>0!#5J-9hW7@&$Ozl4@jSY?ks~Ify)t2;2?=K+a;=lhSxN^<-BAcy9-y7G( zLNI3b!jXJAy+OfkT8)kQo9S1IF`F5Qu>9P$*<+Wy#y2rZ|M({MVEjeP$gU#`@JI2W zT){Hg&evs7EP6R%2~BZ(LY|K2x6n`R{Bw!5BvxMwXMf@46Ua@yNU>~-Mu{N<4&MRm zse{tx{G5~dc;=!;lbntPPQRiUw0nj^1r1q24&FIo@Xi6;EnqjANR9oNvVR6xCc4S+ z4BJiIrN4pc@I^3dFtV25Ys!{L^_I@c)iPcBAiNQ+yci(=xLi25-d3g0?>1i@1chSO zzOu$2ElC$GD=&SNppscuo)1* zsZrLR0(zND@=hw;{`9Z+L7-&oM9u&579r3(S&=^2m-;7xDVE@kmg=R z!0iIjD6ofq{rA?OtZM~=cX$p%pU6M>@LA~Bd7QH7sExr-zbrfZW=^?Obw`wf?AJ7p zXub`TA;tO$Qhx0Vu=rSmqsyxEK4{((pVd8x92AEdWBQBo!cXWmCj|G~eeWX#?spen zh5xtv@ka1kObb}E09u`0OA#`h5?}=m)uuTbC;F*%G4G&o%*I_Y@AX+rkg{-W0};vL3>eJnN6`tg?Emv|DQ>G9 z+I#e92vRJknje_Cp+{d`u@Bz->M{ohtGsUqZjdPuRUVzIzQ$2r1loj(z}536CMNqb zioXSb6y|-7hD!c+)u9OP-)1VqIH+oU4E|n^TnPm2Bz~>Tx}9I{;C#>cx!As6X3vh( zE3ke#zp+85>_30%>6vm2Gq;>>DaPO31t@JTn^(S7^}1()bd3r|MBIqDO$8(F+bg}! z^9zd8vEc{yDABp0dnU@jR8P_KTb1tD3BPLTONM`UAXmk0!#q_HlEfEc7)&1|m=Ax4 z7|tvcr>3Un@ET##UT&1D>LXMO){BqXNx+ZE2Xnb|TeRO70S0S;3}=YUXG!^k2W-r5 z{rL+L!RhBf7bIye?p&g~wST5gjU`}ZOiwhB_>4$H;k3T4LDz>RY>}qcr?b5J09A4P z*vGUKuGG26Jq(g6MGFfayz=LxSoM24I!+4pI(OW@pY9MG_X!nhncUWqUiB`nj;z2L zAf1a3n&L3TIfOmnQ@{G;n8-iE#%5-xqh#<_kB|F&%O1eov<0inR~gLnV$4gqM~Gr{ zRKn#x4oOvn!YGY-j=DbReyrYhfWb;u6?fXN$6c=&?YE}q+%y5GU{W#ql@@tT)1j`5 zH+A__5!))?#=_T+KkOh~LY;O6xe*Vk5J+n|$D-epx=i>>9PbvIm#UZR&et6s=<52Q zL%GXysWDsGh$(2bfPTj9g|}k!l#EahHVB}T>y)(~DIHxPUX~Dp(I@p=Y{M>C?Phf{ z3uvE36^Hup_JAehZRJlzN|a_@O`fp;WRxIbB2Mgf_*!f4C_vIw2Z6?ZkQo)pt?ZE@ z)Uk!>+%D5*$PZqdyr@Kbxqo{dcEjFj^sqw_@%^$wLtks zy#C5L`tVEL*~Tv2*;Mt7#9XmZM5|lw5^xZ$)H3`@lg7Y7bUxBF3OcCc65rqVAhI4= zZZ~`J=&LLKPUR$N)&TIzHf|+TwD#@aJccKqS*D_8!E9*Scn>)SO`44spAM$h>=Rx6 zPwA+sz0MG{W(wL#6>NFonnzD!lC9ANY`o%2&tW^%%zPnT*T>U0Yc+=Dez#r=PCAR< z-;d-z@wdS|CpwCb*`&@cMPpipMiqx2{d^LBocN~1m*>nyjryvmOX1*`@u}pThI%Nu?3PWtWmu{~i;8J# z`H^2Nscdycz9S-qsKwFx(bZ#ynuF`gxfblCnuJymqX7x=B9IVy{*n+={v{#af>QGl zDhUyV&CQyA@_Mj`*;96bWmW-cx(YrfUnfI@BjrkOufSeW%syIVj2oBzgx_F+nvyikw%+0DhF!kQ?l5C{Tp zVbxgVZbww4sAP>Ey?ra88>oaiVX1{<$twUrl(D|Ov+Vq0KNh&_e#CwwwF*yW7%x%p{clmuQ9!)P__}gkFSn>Y0<`40JK9E5_|v z@4R?S-a!^r@`ETP%BYPU6%>FSIVWdwt^W{mt03;i=)TQLQN>?B0W(v^DQ%no@Pfzi z8>J^=wQo98XYjio0?U#QZk90lypydZUC^%6CXd}XFFU8YY$xnM++a`TvLv9QSv1}w z#}{CB*1h%^+9 zP87BsHP1#*#*I~8-!@~++1X|#X=;unO@ZRD0*Wd=@GRo*b{NoOJssr+L?ijNf-RDG zl^$*;1$fe6`4b?_Gj4D}3(ONtOV6xeCOPvsz;4t$=79Sth6fkJWTP}u06dh^ zrVOpxw=3|TpdFu;ttaxZv9TSkxafsJD+qGW-=Z;89Jsf_#`g6bzzknx=n9G;$)c0T zzIsKsJZ~_14{tj~;fk|+GNWpU-X0@0XN;X0uXWcFZNy=QmnGP z1~~qN{nX3y0>-g($5xbO_*4tv0wu^Mx&Sae0A6gBr4w2oTNxkM@m?nh=5x^!I`i;~ zxnSDcqqa(H|DnX>%7CCV0dQgL=)T!+`93;Si6<@dX?D6d$RJN*0KFrs-t8l1%Hk1& zKNZmn_E z*`@eRP`1D?HunIZsxDLyh|8hfN=2eeogZ6p zA?OTrZJ-}H%7U=X;lOZdI*DYDjDXq~p2TZ<5OG0c70V*~2s z>Lj8;5xGU9tDiQ0j7Ftp<(_Gx$a4rzV?SR#FBeQzET%o)8Un=nE?;AlAZr7&JaMI0 z30Wlukn_*9be_=TXWZQYq(Y4!1sKO-W`*V08E7w1kKR|!Fx~bnmm`jGjBpayCgVtBL0tqo`83V{zGFa0 zNN67flNLgfk{x!!ZLWF^OyEXHO;#=30VttS105}k+(8gpt##2$%X7gWe(z&ZUj|?c zubU4x|4)G&HF5S}ScGmn%b3+qc=;2Qb#&^lwsSKd(`%rEp(`!-j=KO4e^8n@`|d!2 zUK~R`h1AJQrIO7>HUzSK$ZEKGSmO!Am9f6QzL)2Rl?MS&vGPaMyI`SE1M<=6pOiPe zIWiIwZd3!=>ab@|e*zDAu&^{Nqm}~0 zTUXd1(KbCNXSjFaRg*SL87u9xtzrncEbQzuJ0-fix^CCC`Dp+~so-n5ixq(WN=yyR zcSR|FP>`hc%Q0Y2*+0A2NBjKG#NsYc{`sK*5p#~J-}G@)(RT@l^gTU~_%9BA_9-9x z6PSX6v;e%Cbi}b##bztvEZNVhPiV6B;614HBCmf(i&)njG|*mcVM_;P+mr-uJ^SEi z3s7MTt{|l6D!G3^Z4k$sc}MdxpO9reGM^SDofGZlzY9jSVZ|}_jGUxOfX9KgY4tVH zsG%{IXp3{Q_A>9DF8B0=u1uRLdQxB4yWkv?;no(HGoK z=N^I%G`@(0;tT!N#udCn{lS~CsvU$FEr*R6 z3Eiqti9kL|lf_iW9ochGJdDYH8~q|ew~D2#VAK~}zKoi+^$=HV2g;LKwPl2bk1>+V zc2Px2(vxR0)@rq-q)Zvty$|J5+sif7_^N0z2qMFxKb*o_cssI;v4xZMaTE!_W>R@z zB3+WKNixb{?%DQ+GP)NvPR)B`U1usXA#BairmP%`m9wc7YEfniL{9=I#TW#z{GlFUDDz7Vxq17^?sUnOx)<@j?q8n!Og__?QH`%7OVFirP2jb(ebxe;ldHcGo33C)CO=KWFpzWwY7EHQ#(`>qDMlyj-u>d7XRQP}AlVfY1)=m>vNZ^_WyqN=89 z4P1fB9rm4PWRGUA=nu@t4XrrP=D2f*fUqeb^{8KUw92+?)UQYDi10$UEU!-obG=|E zm!Uj=;27;x@90{O&azRS4?*7elT+l8{kM? z^1_Yu=j?kfJ-KCg4(6@osp)C!7cJ?uHxQ95qK-=SqQa9odB~9X_G&f~jP-m
bl zlKxXx9I&JY<91CD%(%LOrSD_v>X}ux_GxrYyV6WLuGLg;DN1wH8#wc#u)M@!10z=w zo>F$9l)csW*QeLv6xWkbwRA1B>Q0)hIZgII_QA zR{JeIofEdz(N)ujqG&-zPgg ziPl_2AA-dH^IqV3>SXRKmV`&eL7YQ&u&jy$n~P`VIG<_Qem+nC=uAPmqm2$Mail&~ ze<4=z-X#BK?H${R%9)6^-Z_4)KNG6TEMPC9bnvCYmE`I(<(F{;=yt`x6-Pcg4m-Sy zt-BMQHOU@|Cu>7^Mt=kuDvx}K%;S5u$2hxt!lYee+4s`zUDgZVZwrphCGCD^R?QA5 zj@*6otMfp;^Y|gbW$P(LO359`({`ayN7`p;d3VryWbhfy0~;tz3%v&l{cF=dN5f~V z8d44=`_t0hr|y_2PBU8X{lh2BkFiaIQ4SjXwQB&(T?+cWSo!^!hqN3QSaonKx_;OK z&jfOFO-+qnCNI_*rnzHOhD7|KoO`g9a5oMkZ1ni7=;YU%P?H=6IHMYCzN09Sp${@h zZi9%QXLF(TrBE*HzyAmOVUP;X8i2-D<+iIh4qp1v?Kt{@Z8%_~|jiZwvIV>&!pnIvp zAIQS#yIVcs1?cHL>I|-$L8Q@A>QWOcRj)oSR|a8;pi96oUIn3Lxo&H8nJG=zzK5<`2?QB$Kcaj{J-7Zq?#L7GL&R54iA~Vl)|72-UCx*Ytxtuk%0- zMstQ6=qPaOnB38P)RQzz@Acf@hux87dxODX>IMN#2=5Uw48Z-4c;CJ6Ol5uu;I&}y z=9>V4jE3akjQj#ysqk4vdb(*0G=6C-(w`c#ijh-g{?kLe`U8W{_N z;r73H!fbW_cF#0j0+45#6V;XS-%3S0}k@0a44 z(iE942+X$( z8SSJKzu2cC4VdW!1%Xy=%6hx-{OO^9hY#>2pYMzQP^7JQ&Ut-c-Dm^cs$MBA40$Pq z1ql)VchW>691Bjz-T5)lXEW;kqjH#D+63VUW@YzgbjKYa%UwS;T)8` zV|Z#rByEoZ%=7U3jnybiJLKdvxi>8m3A?C)5YxX^2sqp;9{)D^1xkNt@#;HHr>y8F zHO3f*OaH-9F+!xB<#_T_Z=lnG0um;JJL5k9DGU`tc>Ud$lnNp7{fXZ7{zO4Zuj_G< zjDN4uff1CdQs6)m838-$2f6KD$EfjoJ$T|#K$KGcC$BthJ1WV8V^KL?tiY&x`y{LH z)Rb)j`yYN}+5b$>SXk#MPc4oq$zU6MpKv3jqOZWNpUFr%>Tu@9R=Dy?8 znVu5Nid*`Z=hPqyJNnZ5S&;m&89zpK<@tNfEF?sV9UIAtNE;;wN2HnY&vag>V>Z~m16L|t^ea?J;r>v{A z|2rrWlcKzR=N@j;4i5Uc|0>=UV3%W;p&x8H{%s04M4^nbf3qw01DSZkz540~2qKXWht>Mgs$B|Q(bDm0L6 zM$_Hqbv3dm@JFIeX)QL|D=kNR@F-zjDsqm#7{(}>n&q)MFD~!}(f3hULaVkxTq}x( zhR)0wr*}&5c^k1YQWr_hG>nV9%#?R8fNUkH#E(X2u6x27>Oca6WPi(&dA% zWOzefUdBDMVf$QaPHeaLRI?3kaQk>E8GT4A{obw%4>8D8mVW;T0`doDa*QR%tDN6v z+u64zsW*hXYc1Zn8U3m$epAY3Q{QliFidV?JA+X=bSYAIfVC`7`wH;H1vAyGP|NFI z>~Tz-EC6?ZMVj)FWgfcM))1KRPR3k|a4N-BD{NCN`r7Z0_PM-k3bW7!h;*84bMGzF zja=mmUoKoso;A5<^3rm=AhGamO#_F$S>8rOlE?B&{T1A9x;$b12APkqjGu0;c0hA! zl_5UNDSRt>HcpJVv%JfWPF@yX(s2q9c=eUPf@{!Z&?DOBXj}5@GaXL%SpckYpPEK# zP3nfH5__lNv>J&ob+j_sbx`r)m#F!NO`fF)&s-&`rTPnRCqrb2RK%Gc7~cu7FU(Sx zK%bE8Y@}={@cd=sVr^|Z)*d@}vB?1@QWhi|3=pTIsoZ;!5xS0ykl>PJl?`s`EQxeZ z{_^+9I1B00wvIm_G)B7k6culFIr1Oz&)uM1b|ewMW6IB_3z*f^mgBLd%EbIJ+}@_|H@EpP?#91wPQR z#{YecxT)jF;@d3XNd-?)m+*$$S)8eL`qyD3TBzA%Z|4B+5!~9S8&)0Y29e z;)B0Ovfh`1f3UoC6lEbr&yP++Ao|(f@`m0n{fL_gIXoa1McP&ag~ zES!J5Yv5q*ZI3>M5PEy4(MmQBcJ`Pj^YNm;ak0P*M&IzVx3G5e`!;{))(+u3-Q2)b z`5m6f**K$!hoQ^2c?!Ss?f3nA+PI=0{;wzSqY3Zj9e_EUjWswc`qItQ+uqI2&DFwL z{-?XL_TDbeV9R{W0zlZ~94vzsTl!^|%rASWs&3g{yHO&;7Wej)*yDqe0r zo>piM@}uv0TX@>pVAv%3Z7PZYJ8OO$gb`&MHy0bQIlv1~8)pk|2jAU;_<1eRGG+ID z9MBCkpTGwf{~bo|%~Xs&_*yvopjTz)7j_2wl640k>`)&tH*DPiXZd-pzMov!!v`fT zV1R(FEid>3Mt;BbZ7w+Mx7EN#zRmynn&9@X=b&`@7xJKHVF^@NU^nM2oE_|30hz3T zq}c!-$okrNdOH9ekwJgq;$Ur!k||kF8!v~m7M45mg<`q8n}e(O4z+}2nT6os&W|j^ z$R%n~8;srfHVm!Q=mo*O9SQ%DOXyi#yxjakqG)!bL!)iBM zTQ49yC?mL=(TW%e*qQe4crOfQqbU9}-v5FHcAjoNfI+Ccd&u>lr;*?e3Hf=^_u`;{N z|L+q_2WRJ9@@H#nBWz^_$nk@>wiXey1hVoc357QC7{$d{cQna%k#R37VvOFOkxF#* zw99tMc{%`H{7b25ZDAv7yC)Tetwe1sZT}uBVr=_AKt;hHbX!+J?=K~xt*tFcVD=P5 zVM}2l;lGH47>oGN3nDt9+9ilO>VGK(t%Ynvtp)d@peVnkfbic$!GBN)fArDv{_ZxO z4mPg93G?%+TR4Fz_m{>I#HOgwdzWkexutV;b45kKAjAG{7SXqUd-%LpEbx<2{?j(} zyR80bHTQHre}(VCnBHCDipfy^ReFO8X7);N(7fT*0EGt3IzMDOsNi{*Tl??e+8Hb5Uy>@Y4-caiYy#h54P|HE7&R){LOWnTB+?^Et zYEtlHoP`b_z6Y&nf(W9G=`MO;j{cXE1099!63pG@7qqR|b>4rn-tu!(_ODAU7^?5C zv7kft-6a+b`~Ue8i|~&@#t%~W_ovxt8;cRNA5AVlCRf@mX?&QQe@62#u5_2?{Xl@9 z+zKj(`8TTfTWuHGYVQ^5e`m^XPC_x1*rl7_)%Pza86R3Z|1y&O<4Nc5C$PxRMHJ%) zcF}v!sDKaSME;D3^8J`j{g>pVzn_kL7=1(QI>v+SBIMtdl=A&(>i%1@(mzi}jC}qR zbQJk!Aa|+1o~MO_>;IuV;P=xTLtjjA$cy&kyXd{A0l>)jpVEJH17;V&f0l#E`hbGc zk4f0?^IE%7F%Z(&@PWiu1US1Nf*mno5dn+6)7sxbRWunf#$Y!UF&1;T+rlJ9e};-e z=Tc#^#{C)Q^P$Ta|MwFL zv=iH%Q1GGc!|sd%6Epnt83jgv|941@X32km)MzL4E2RFLQ3~JS3@9jq5ooueG9%FP z{zp&>m`HB7z@ua2zm5qzbrBg)PYV>Z0b_g6J$g{@M3+r4y>GOU6aHnKfHq#}KDdkj zzZ;JZet&H|-!Iqy`FK#>`?lkq-R-p4e_z*K;KxvdpL_3O%-@WC_^oO5zv$)iW16GC zOrudSksSooZv{*6a`W+HuoC|j=m>taFa2-Od{n9ZAJcran(x+pw0QoPX+FC7`fG~( zOGAP7n!h&umwNy6;eTH3e@y9qOKkjWxGH|`Uq@B}ZtdAyE)crpXYyE}s-xCkGJ7wCNID0sk zJqLbk*I+aO18e*n0I+*M{fF`!##Xz75C0>e*xzire=Z7p5_Ctke&i-Pr2lUR%I4vG0d}uw5PwQV;am?i|Q}U0}8uR>Jy~F<$j`(k< z^?zEfF$IHNwEkJQ`@bJ?{1>ta(<{ZiKeS5$_JkW?ns0x(wb>K%_3h=!Khiz_pMxg< zg))Km{+O~OMno{I-z_2-yY*MM)Aw{pn3nTy#r^Xb=08HXUrD#ng!>MRL{sh`#5M0t zH%v=^?|04ryTQ)?vhcR&anYrTe_M@1r%k&%J$tVAP3(S52!C@q{%0>K|BJWL_kQ|s z$fW=NO3Tl|sr(qI(=Jo+_W`K)Cd*%)^#0a%@Gnr+f5dw5?XdJmfkK;)e-vuHH*tT= zYyVvJ=D)V0@NZc0OE3gl4R(qBo)0e?#&AAbfjoRroOr{=Zgq z_?;BT$owuL|3_0{OqjV##J@vfe^J%^?|7H+n`!(T907(Cm;n5rgoN*@44AjbcCqGX zne%@VC;nH`A5HK*3Kici`|gD2Z>zvzzz)0Ujp@Yxk(~Iu8vXAe%Ku8KK!<01|4Eqn z-fD@-c>Xu?yKnvsU`%%Xtk~XkK{tf|eTKKwS@+W8Ytd zl>O<~`-}kEeFyUje&g?_pX5Vbpd8b8G^`=&;^!Fo@8A72?PtH__ruFp-+#RCcc;HU z`%%DeA<(}p)u46p-zqKNZ!OFGN)r8>b#_NyelPr92$P(nLj-<-Z|C|Bs`~}}o)7bG z(ca_#G_H1UySGvY*a$uS*f8=)$e?NeC2jz?UZCdaOitVG>5(tDI zq70YO^EaElLEuk*>g5wnS@DBV;e%I>Tq%lR;b1z*j;%tbg@u)^q#X;tqJ@J?F1tSp zCSOdiL$0@kr~OD8DmkryWI1Sj+eVr23NEx{#Ea#*xY2{**Vk{{cqR5(;?w%*WTIEd z^7EV9H-{ubX<^d1F%U9(Y+AeHef=1lM4KY6 zoH)>#C?p2USDinTF>R(h2RuAqcq3l0f;@okf_=R0i}wPS;=`71i2!HCPUj zi%MoqNZrrtOw}+y25}j}mWMjflVQysq(|o6c$kw2MLkgzJ1Ut9r?=4LII-qU+^KH7 z(b7&)GQ;tw&v3Jn)l^_erMJ<9_7}jDF8YWHC83_AWm)}-Nyq+nc>0rOEaD@28}%#x zP=ug&OlS+?6{TLoXk9M^>Tt|(m;#hljqMUsRD=Xk^ktT-O1H!0a0L~?0Vb&V{@i@H|IED7T1878J71UflGwrMaV(jmrmYMC}BODn7aZIwdDZz(aAXk`jA6T+XH zh_;VvbM%)^jfDoqvpUmD0FlUx4#lE}n4O!Y4P3QR&hl9H%u;}Ss>PWfi{38OWJ2cM ze9o+j2qnRGndAw6pMjP62y#f5wXUZ5FfSDGfUs+Xj^Q9#dhuhNQEyK*Z^1DJY`Am~ z?i8=fmoL;tYPh#yFV4EkrYW-LCG+#0yp);IVcq{%pfZVL{WU- z6(v2pdSoA5+7+7#I>Dmxg$yM!rfPtJaVJ9aS+O}7q+#03$Fj)gx)(J2seYMc}esReBqSQb(}w9cqk{0VX+U?Ybjv zh5PQ!>tR~R&Aw@Gq`bVm&*JCLoWu1YBlUTCc?m6*H9lVHDM$B5PqwhrDCmpQ?RkWc%_@F^Gy%)z;Msbx#c1+rHaiz9Qj~mU+ZPOABIT zWaK{4Mj`4oNqkf?uyXW$ddNe+#dl9b8yXr;pREfffdt-T%X&(Mk~aC!jl{dHx^)WJ zD)T1K$q|g_&!5jZba6t0Yu;7R%#vskWWG-bmQwG3DJeR|WMmX`LYtvI%T z9vj|3joa!b7S3^vT?ETujq5OSKsvW4=w+cv1;<_eLZbPF1!O8|pCT?3bEuO{6e-=! z_08oGyQwd0v=DclZlXM16p7RurYiLBnH`gMdVBiyF}-?2?C0|)Iv-sxEiEsP@VULI z<+plriDF2h?Grw(nOW80hqPk7B*xfRl9LJJS=6gjlaiLPA)}Y<&!V_21|>2;a9-%N_f^-t($};kl;Y+-KssLJ z#L2aJ1Bm|w8GU_rtt3NSnyU1H`Y)|56vHI_cMa|tNUgq$hwydOgltQG9Jr73b#{p0 z$)#iAUhkhj>PQvAlFE%HjA0m^nb}Xlp%*$>?j+Zfqn@FjDm>q~p9d1mJxML$?>_gy zW$3|+B0D~Eb{?`T>5x^f)$6(!zmjR)9pR@yyyh?{svR3|jU{H9i{_l_=stel+u3D5QFNSzu+tc?th%{8Bu^-uLHf30@4U(t4MQ;)vfm=bh{x5Ndvw!4;n zz9zL^+>=;;PCe4X#aL$KaEGa@)J>i3>D=^2Ag>wVu%bT1<&J$NL);RV9S4dnrh&A7 zwR=z_+y<;ns3G>diYDmojUw2nW4+C%mTP2S#*L(e1afPh;=a2EI7Q}Z{5C|a z*j1#o4RHw0M?F1@Wu(~0vnEb2n4RU z*PLMF;NUp-=}ley6p>U-mo`3(UIa*!=b_xSG}#7<;C5~#6*;+gz)XMPso6S59&zy$ z6TcYUXI#c*?nDwCz2&(2v+7~$(U)W_w8ph1!|E3BA;OnO>@vPSjwbce z?W{E8V_qQ!oEeS5J zjv}`47a4Lg_g+4p>_~Me%!ntr%o+E@-m=2x?x@U|To$#YS@!697b7{PJh^A15Pi_v z64nh30;iWn<@+uC{YBq=-ViTTFWXN8?7SzGzMh%YC13S0w#sLd;TDP;8Wlm$)5KmY zTsr!c7>hAxdU|?(wqk_;eMZCw=6Q3$1qT0X9NANBnd}ouW@q;`Ha32YWh*kRg662F zeyuaR_o?WO8Dl#lbR8@_@i0i03r8zgaa#0rN*XvK*(Z#w1-l*c3* zjvHT&i6IpA7{~ow^gbjv%^W}Uwc|8nNj#x_1X%Z?cUfZUv~zuPwTI2%%EDVKN^aAs zj&5okeBw~xL%O+b4;#8B;%6be{I+o8Yb5SXfklho&fOQVd)`249{8wN;1G)%n_-!~ z_EE?)i#BNlFkn~Wz{*uhx+l4rN7d?^nV>%6uZwKj6Hj(}DhkjhT(n~|dl-0q-{9-i zC-m!!JXcbEh-f7%+Y=O35h|+*VWFdI2lWbZ67IZMLgtLr)Z&zA7KRNwJ)NI_Xr^kP zJZRizK!9}(#*nwg%XWGeFGm76Ivk1p$cYA*S{kpVsV=GMFKZfVk-9KonXMVX7MN9W z@}RIY1Xy;fTsf~h{0~m^)Nerh3$D-NAU#C9>dt;SGv0&{PW78z;b`rni(WjHB6oe1 zcYcxGO5k!olw7iMzh7zhn0^Zx;fs@4*KSf-z}>lQMBi=3<4!zYlbODvKRQ8~DI_60 zWRje|Z<&AjapBU%(QQL=y;wsRflgo@#+Igg)M{>Araeh?Ox*icc)5#CHZEM+4Ey<_ z$>M|aj*QsOxTez;5*wSFpG+&=GcFXWb5_Qbm{xhZPj@q?irl#+eY`s4813D47!8&R z-geq5o&Q>trz{zsW!G!YdBYWF*kh&9^s?zuqnv#8j2If7e1m%jD%?gMR8150^cNU? zb~Q6IGg1&>qXsb{|3UYtk*klK^Uo8C!W*syw?FRbDdm-ts!u4aNvU6*q;XaJd|7JC zz8opq9Qd|LJg?9BiX3lQ+Vn=3CPQ&!y!-nVS;DyCJR9-@$*&)!F&ULR%Y|B!{of0yvQ-EA3c6LC$zb3r5d?Ww*lB|GEP|9Lue% zcx$vO<{GEL1F^oqi79NhLxx!AWz)+}F>St%kpB8bN+x}4e*|t{l;^2wf5~$Mt+Sj% zW3FH6w6fod=$BefZGGswtM;DdtiO9M++$mj6r3-Xf&R{jj@sSUgRnDUp#mF+ds--J z?JmkQDagw|0-VSQTK}qlV&U0bhSuGuGiCT)VHua4$UAA*2x;$$B=^&cH}Xgw>egM8 zyAgI>-wT*S8r#mBB+AOl!cGiG`$J^R^w>r8;1QnkIC&bdGo+}~D9gjvWC;GjN z&S}hj#36w>yE)${xRls^Q|&q_NyF~_TE)eP;l{$=Ub!%+v?5r@N zM!-{J*s#dl@xDzp?SkaT6As8VoC@Cm(wMK#dFr@PPBzFbdS)-@FeBM6(Zwk>9Wi_& zfPi~!%2Xjj;|Z0(1Fab;R6%qzN>nco)9&UbH>U#M6Du>tzEuQcR8YaUvLf<&H`x_q zCY9=%>wTiJOtCZvnV`)JjHpE?SiWT;lb14S8VYdy-r@6}tGRM2`u_7! z7X>1ruS`mM!8Tua6trkFb-i)TRmv2#WcG?zO1R2$O&*sVm7Eiw4%dAqvgLO4HDnUv z)KAm!kv#?MM*(W|A?G6J@dRr%IV9Ypc=r(#GP3xC=l0p0Khi^T=0LG+9oDm(nSgY1 z?M$=N^*plaGVu(vodUz=3dGZR`@Ein*VpqTZ(Zy>BI3ie+&QppAgSY(`?a$%s@zN3 z;9(l!-H)9gh>)Tu#;jT^eQ}_+7YRb7%v%MxGha&XVe2pi%0Ga%!T`t z(z-#COTP~binw~F%#08S;}xS#GAAybb*_#WxFnJPY!t0YwERZNwN31c))Cet z>bZzW7+Kb3(PI~l*m z=1p7~pnT_JG`9_EsEn+v-IpT#lf?pXm#GVtkBbqneuB*2n>7zOH50n+!0q$s4cxji(R8x`tEa$S=)^%#8|{YB4Qz zF`VJnVN2tl+c3Qg)t0>=&O#@O-}9ty-=XnDs>==cAFv>Swd0{`QwQ=Xk5YfCqriB` z=$9y6NRg`7kwGqhx)YztSyiXhgr8D$g8ZhJ5{&`vF>JQEd9pd%mTd*T%B`7W!-8Iu zh0d;@l?M?5!*jM9#~Vg254~6%LcSID6Ny(MU&fQGLY2voQ*C@G%NoQS*VKxqicb|w z?3U96S9FjCW=Lh?UE+Ege4@pOG{<5w*coEq1(Vn7A1kX?B>EAhjm?nD?Cas-P3VEx>O{N6P|h4 z4PrG*X|TctMHJLA>WTxFOM~4#x$Joe>)MV zAg-c;%$uNTFs#ujP`VJKnfw9k#Rv{@+}GyXn#61~C}PK40`fTj;(=oFnTb0OJST72 z+Zf3ZiFcDrhoyd#akRs>h)*N;qSVZTZbL);ss+#YOxn!3!eSzk!QBeXGG)13o)$; zqA8jf4!c|7b*n#2L2JADamm2wC6Q3Bm9reqoiooCXu8*^sfO36=SI?RZ{g4Agm?|U z7ZUf4`f%(7j=z5zBz(+YMa`Q|@dOfvxOCM^M-m*W;pz^x@1{09iLVGWYkP86@imJO z4yPU$=|ilCA6&2C7uKhrqjNQ@!lTqM3*)#PEjwgjEFRhTXg{UJc9r*wkvanESFhX| z-*dlvHxZV8S;T;jsHLgI>ei#U=ro=BVy(gvNDI-x8zq=iRev{9DdBu(gbm8D#G^Rj zIq^W)U@B}RVrf7ff@itfCUY{SB830N+_MB3ByO;#+r=KF68Xz2l)H`R6hXipl;4tE0Q~ z=Y0W^K>z{`eI3`dOMq|BQE^1NX_YdQit5>pUM?vS=LP@hd>>9#6W(i+p4bM@O(Tbl z{BE_63TxcA`J9ne{PAVb=-UZAQlkBjMuuo6zsw!z$Tx^rXA;l0AYw}ZrZwfmVHhSB z8=a7w$u>H1Q`2B&lsoO@5}AaV8?XOr5Y}JuK*&Gp~;bVx7R}DXQ67%-N*KLMgi+>zLlw) z1bWje7uD~Y(nHs$y{qqlNLTWvfukFE2;e)=IRtwuVY=Zyfsi z@s-#1-Fpu?*p45s&3idBLJ)dUnC5l~E)z6+wwU5=y4&Vbb76*V>Wd+?vMjdy%WGk#L;{J(uh_1o(1;~)fOb75oecBVb!nLy$_+Hs?cP&n| zTYwC3C6~d!De*BLI~yaA{}bVW@eSQ+$u*$hMGH7p|;>-qL=hN3Cn`O~M-fr(9bu5IpN9u9KF zAri0kvu9

-k@l8FTuVXk1n__Fo$19te>oO`404Hp{q713mbXDdZl3R-H3TnPXnK zE!T-xr;_94$49|_ni3Kc_(48bdD_=VHSp_4qnMZ&=?qc*Su%MLW0CAH0%1n?qMotC zrC504R7Uv&U=T&1N=(LM%a7@17*2i`>^(pdtuo;ln(pV|QTCeGFZj{PO5b%@TFp&- z3x+SZoZg1D=m)R1Q;VKk6^?j$4_kJ7q41+a&ZQcQcp1vOuI%%R5nt zc;7xT39rd#la~GTgRn3sa=Yhg0Zak{rICeYDw`(&C(cEoCFCSb+j;2W)zm-}?+pv-25MWNQkGUIC7o1!7dT&-;% z6}w(^KKy|gnb$LY>7G$(4)Wxa;Q_&3S!@YfJ5Vh%hfhJIOtUII-fJf>>pP`Z-Jpl2Mnpxi znAi9gUNE~Cy|uaixaf3^$^aQH?SWJgx2R{&)aI0qZuo&pN~K_~W_pxl&>BR8`Q<(H z5$CPSO3`G=pz8b2RIYK1u_P)96{#O*fG*GB8P**+({Us;6fzvJH9~z<_}o<}J(ePT zyh6%3+|QEsWQxN^GJ@CFx=i;Gt)wsE1D9`5OUX}G=vJPF1@iMX-QP}%uEJ+YQ2|8u|$U!qO`$J zov5Jr%KDM`&Fwx;TC#<>Cp5Rt_&BOPJGMVkllJ7{gZezQ6x0< zVq(-#Xsl)_;?kw4)>b8rqbco|MM}=3gy9ezX+J7p`)m^w$2zb>tAuF^ig8bfE@^>*F5a@)Hej z66jQVH-?-jJvj-_ECna^wcZ#$)P^gRL(_0{%JiHo#ER>US$(r{QMotpBDdHqs6mYI z$U8+TtwNMk;ug8TKQ}+sT*w)Y!*a5+UklCZ#G$zmh3?Nq2j>FL+Q_!ldEl=gvN2s|cNeVCRwR zrMYA4H%6$3>^e3eZ? zrsw7x2dE_8Q#XWCLaB&Hk@VzopaUgx>8O|vR;Ro*8Dc;sIF@>wEA6as(#Wu#zpLfv z$qu*4cklGb71g(fnR9q}QEIN~Jyjab-HB z*s_a*@BlaIC$&JDmWN-sdk>r&UU5tFZT5ayWIoYE6I&Z%PEwxpGvl7F? zQ!=TRzJDUT&Fj3=yDJr9U2MmBc%(=zN7Avo+%!kq9@n3`XN+@1gh)hzknXN2-uSpx zhK~J&VrFkWtyo#=N3p3k8O`%|FXoAU@FJ`drANF0o9#?j6F;N|IA8;O+e5kfF3To}zs;vC{$cXN`JZn4fU41DFQ>Qnwl6x0@f-+vUL?y^s7$Uf zvqESXo%ddBA>#6MSpjcb;!2n0+iFP*!9~+Gqk~%)DJ>2?Q4d>Hl?+YnNN3l6cxP0A zA~6Rk`EblW&YkFW_n@Me6I4A5*(el$97g6+(!2E~RdPbDOz4`+9h~}&4ZF&&0zp6B zgy|dYzCP9Ah-pxq8g(bh%mQ^7?mAJBFMkh(Z72}v+#>ESG363Ax&K*zXU&_UMIz5{ zjXyr3+~k9h+L*^l$^|V51+3PT^zH7L7?C&KA_B}TM_#hY!UG$LA<3(5I26kPi%lN2 zE)ZK7LZxCvK1~x_8fWa*#1r%S8H#f!U{IGrWx*T1lkzY+kH_1>A{6aI1Gmf=xzGA@ zuInSFO>~j1nyjB!-HB?Pa_0``)<$+IT3A0%n-{8JXzunF;91n_6^B7r9b9|9ZWlY9 zpqzde`Jm2RqHDxCyqYAxaoDW6XR6453ZHkTw|N-SGMLT;HBzqfK&7JMNvD{^7fU>l zMuRTuj6o`)YJxnDZ7wbwVn>wASfHOjf3{dy z#M3H0OJ)7R?MT*{93*_J-lOhj_ObRHvXi>VfxGLRmp>9ec);Cl%tZF`z+mja^+O1w zS4J)D&br#1&ZXJqaf#0StOc{j{E99yb(5TVa8B(R_*<1nV=QLGl50pue6SLhc&lj@ z6!LIB97nbm?!&oUg^!Z}tzkh@hE?ibABlhs&!(lOwg-*<<7!Mzr|JadlZsVPvpaIVw2TF5{xUj~>?Nh~phUP6Wn@i! z09kaGVozsBA)YmYY0-&o)hCH7rCe~%-qt9d%~?uABo4upQl#9({$5h^sn;0xYl!W( zNMs#wG5yjojn$-t%qN!B0Ge1@^wd!6ZF1Kjo+szE|UlD7X>RM?*l(M z|8>X>#6f+ZTpzsX^VS&8Is*-waJe}>XJwH z0p;7UM|PbgX9}9*K@r1-;q+S7e%{~}f6>jC(@^#nwWB1ABggiwt}Zi33e$F9y8S4# zFOI&zSKX?b?!gv>FN;NHV=>@Gxt{?tQS|0%S7h>y?C1qI(ujBFA>$Y3*4NyVp%sK1 z8?k<)MCnUjxeOjMN1r995*k(TcWj+tO9lrid13@ZX2`VA7*KVqTy1i3HRa2Yfpu+- zfxm)n9oxq8X$)9`5{Ndd1+#$1q&of;=z_5XO5ye4;{>`=q3oYD`G^`m7C0xb_!8Gr zoP-_`bZtd6ov84l|FFuIMFn4|4BX-u^%)mj^3SfZ1)lTZxmpQG^>XmVtm>edB=;5R zSow_BEJ`8;gZ*-2?(P{%xS*iEiI-E8`pW43?F3z>k^wn~+sqAT{YrbaoB8%B^L{NK z$Jw5~pANYzWyy6N<$O-SVJxSs{F(I%wJF4F2atylBot>1t*i|l% z^^1DXUi{#oz?5<*Vs$lOUgo2}|MUdb!GfMgAMd;sir8w@lP5DmMs|yd`}ua^gp0?SG(QvSOE`#lX!npw-Jwtl2Yr~bpbnJIw42$ zyH9;{O&$h{Jxde&tOWj>$o(|}oJ=^Vgz)^gc=$-1(mR*LMg7A&c_1KgVY5+iyYZ$g zsF@Zjzk5l&=mf2)A0|0)`zWshrAL01!S*h6ifR)x3 z4)H`9WKuL;mrUaA?|DeJR%WGR%K~>Pm#Epu;hjakP17>yx)PhlGSHGZC#+q-aXARNRyl{*{YG)TarCzn*yAfip zbjnTr<1GoZ<{_k|lusVDMVvBaB9mv>7Lz(wSgr&s;XBDn=Es>OEl3fWvwbmOAy7VW z<#gx9++-HPvfRj=OJ%*$2|whQ=3-V#5-)`Oi{>I8QJ#zBRSAolJE! z3%4J%ha#Rm4rOORz**Gt)tMrdnp#Xu?hf!-PE-j~r>yC?wNafTBXo<4 zi}Z|)I9nT^H3v%VIydhzTs)s?b}z)d@X4^@4Co+Ty5QO#0Jw$Hc4cPhs_*q7 ztkjf3O7e!0`5k4a|U z2&LW+zyTebL3KKj<$l&GAc{X~PZ0z6An}eLMpf2Yg>)-6hKrlu9?S19Jnr&HFQt8- zshAgqtKI7`?(n#zd8N&HZFzGZo=S5=!mT%Nc<=f7+3VM(6d>VTpvXIzyE}5)sk58h zz}0_PF@`q+*}gby79^~7gNyzy7n#!P+(Gz~^MSq@DHgHYPGVxk<1axGc1z;j2oHqR zwI`OK_YM{I4E`4oe>tuKAAE^G(q=WKFr7HQ*-ct4dtS)sb|r;9#3nGv!g>8c@!R(_CBv@^P&M9Y z&Rv1+POo@EUj&OhHaoEPY_E%sK(TD!qO=ov-^=t${DsMDChlYJDDoEl^K@}xI%57~ zu#Zxy2iDFl(=&3?xW1_(sQB#psBLq$fTZ@r0Q**{We6W9GiRl;RIZ*7?_ z0aCid*R(_8%KEVDA#{1<`dCL!u2<^jBo}tUE&NZi5){4R-Wqvso8Ye)0hO!)%3{3tSqRaZxvM4@|!^?@RK&qgnYO#gF#6|1z2&#e244ewOfqiw2 z>spzSufPO3!uTQ9g>ow+A1WTGdi%v6+AXmFmVA|u1epI{cBkKwBLqTvpgDp(b|4@7{M>7+|OaSNp@FTCkEbM zz2bc}t_fZLY3Qset9xKC83f9D1FP#vFTNnE4vv1x3ZVUp-P&;uLgPmp8Q@m_;Cdeg zPpWzrKpfCx7XX}Y$yF#4^aKd~A=02&R^JhAAZ@fwq$O}WL$vzHvml1gxM|hiSdIi= zqNB=Xg_NXC<2BRj4~X!Wo-wRY5Z(_%Ln%~PfWeA-0|JEuC0FGwn>LtxAPpim z#_R!13m-4OE&ja1Vn33Xj#{^F{ochFd5v*bJO}o#)xA&)2j#wix0kZ!J@s7dsr292 z%77v&EEAw>o`OBLy*L55ywf@~Ew&ELXq~qO4WNVq7#ZjK*yS(ooCc&^jpDJ41sTK9 zJt0wxSmINI{`7&Mmf2dZ3+b%LzsG<*p0d`nrF+n{pT=xPE9nlXmZ~MQk&$U}fci22 z5;3ZSdhIwgYqIm6p4Pj~b02O`sg%UQ&De76Zbl7ejA`95B{9yj&!KcD8808A92@>3 zNVxyblYQ_lnGFB0i!Oxs%<&xc59A2N+OCWpY1(g4N8)bN%f-T!tZYqy)shlRWDb1K5a?L6*s;ADD(oivUHB4iDG9$Su`Qo6^qAehobmYU$ z9a7#yk&+Yhaxzr1Fc!ecr{T#5=N_T#XB$VsS1dnevH+>lMvMQ z2H*AB7#Gp8db4=}hA22GJ^c*2!V_zV3&EF=#;=t)dFq&Y!kSH11`1w*(v!B(b3bK4 zm{mA84y%ile!Zm7V?n;q+;mN=t>;i+UEM9^a*p}+3F1~BwusD3*viNNGT$_HDmF)v@yys5?y!$p#q^R*&2COdZU$J%nWkTFg_#`L=Dh0ZfL#HTu9Ky~FCg^o- zZCMmb4E1ix=K-CkM9lQ%O1EEk5Y5^PrG8m#E1&<;n&IerUbpdUK7lL=>Rk^YoToSC z`&Fc+9g(e;(9bOtQc_ZtLF>XRYilJ5stz%smiqwKp04d0C&B9Ks<2B_($y26qfzhv zB)~(JnXZoL=p-SzED0;y-1y>}9Q*QR9346I?wOrdI|1Ov?qXr8N!&63rZooIO2RHb zLBAT9pikYZGM(}?>~@fJ-hN(sdgN;9lE(%s+&*3Ss;WOZ-IJT4nJ#%HF;PGBnZ^zA zN1zZ4aOKMSNe(iG%g?S0SiCB+(I|YPgR}BL>hm*!D_5>SwzjrRyFm{y2Gog1g4aoe z-QEy?`OsI|3JD=BFt2|I54N!M8+cuNZrL^YW_4}*EKGnxnM6!|f;TW@o;r+K>&Y=2prb|2m z0`YOT@bK{)&*SZvrV4oX*S77Y9bXQqA|%Gz_Jyz&zqZ`%GV1U z7%(iV1?>cGl{nJD-ujS`fpRBhW!WOHbQ8dG2eO1Lz3; zgsaOtTt7h++9>W}i%0i{v}%wxe_I;`?!P+cI!UQMUMos;FkA8g48hizqj7`iQ~kCi ztG`o!!P9*TW`aQr&6H`C(1(JJAy(C(Md@*#kS=WyfM3N7qTH*gv?*P%4^6>O`M_L>R`EsVO2ftvuk7$d)_XRm{K-F>Rrx<~do`Nmfa~Bu8kI7bgy+5$A)T23kl}9o; zR5krLp7@Jg%Ukb2o_MBOudUBCL8qQ`^Dt1k2qEt&EpDRhckFhcdKTFw9;_7=9qrX6 zb+loH`XSp!wY-_T#Y6Do1r=Tj?-9@l(UEoDCl4diGV@=U$XZz&4JSR*mcV|Hn)>NJ zDGR|v)Tt&t_D_X-$}b0Q4zQ3%)zX{R_;T63KliE5?gA4LK|Sx;n9OUJ^I&uqf>*Oo zss!9Q{wj>&+`9-hLs}GA1-t=qdFI@QI5n)iLnTgXKt=GQPbi2J`hI&+&06e-{jqGW>UVSFAS^gWoEuCsFPbJ5~ z)3zuisXUy!GUfMa+i^e%h^e$-q#B2^L?2OhcPCV6Zc)-bEUwhR%k2kUbSLtzmG08AC#N=_8}rQh4p;kd zjwVR7%fXP5&};_ml0#<>BOK>VN_wmgBKrHkrtUvJ4tg+nl3Y+}|&6`3hA1`+bjo7T!1Wgz{6(r-$KbX-74#jPr<%t*MTwI?P7i zJ9YQ)^8PF7YqVR>E1`ZOsfUJDj*!Zl@NxuBKBJZn>s-463VZU?D4&-s4>Qj8u4m>H z6S#Igjwxjg<`jnaM#_Q-xlEk{>++5#rkfgF#v#;v=PpFb-1iAUw1KsvJwlWY%%mp` zIq%h+vL}g3?{bfLs2t9+1(4#gh@*6cZ(kZ28ppS4+PxKrr@0<}sFc{xw1_-ms@SS2 zk{ZWk8PHfBy&uz&DE z&E-gK8S)dNc^)|XqaH52Ds#|9;8lRK-)aXJszU31b^A-xC#6aG>te_l%kJ)Rv$6M+ zIVoMP87BizZbU)H1{H(Di5Ev-+E}KpzNnwKnPiLsv$@Kj^qKoket6P61A21fYu+pK zW4_lxKZ)BkA(^mMzJVK=2iQOMF3f$rrj$NIhZJTL_(%jm_ zjyV&?i#Xxk)6+9HW5OQfEe!pj7vO~rQqup!8(R(qsY;Hsr-916A9t!v1yZ#t-}ogMQret z0+Z*VTcu)}H5DuuW85SQdFeU2}N=?)ZtkMJxf>0{bXZ z9=wG!p)p@!M_J0eMy{;)q>_nKNKv{>32ynWLQuq&yzh*`I+u|=Op}(ByZMgYa2+`X zMf0(#;o1PNJl)&_ptqhMJe!%Rki7qb_IafyAsYY4o{^Ak62(hL$1d@ysHluzB5h>1 z?GG<8nnx;GXTU*h^AVSc4DqhQ`jbe9ejdDjqW5*jgR@7%P_J6K*Np>TKRR6H<^Fk6 zy`ghjnG6^B-yo#gr!-xm z#MIpY!`3HMS}h^72N4z%BM8s<6ZEdskeyjDDSXd$XKs2Q&Q;b+>kY>1%1qTmQ&%!h z=H5FUefFBHP2STlr;2Nxiz==jr^ik~&Gb60L1N3?FkWHYY2N+hv6i4P^W^J(RJf=k zo{HPg^oc-|l$7eoLP1B_+?CJ?8?R_z9hzQc=?glGD}L^GODE`ara4^rudCw@5Eic# z#;inKKNIT69-kQvFqCgq?obhKc#o3@`n+Q+pk6s5=lF%yBH&f;h>ByF@xB;s6((rk z;Ec5hS+PcFvzz=g^{F%Sg4Ve$wda!Iu#0TU`_zWlj+Nyf!{-rlIPOrTa{R)Cjc(>^ z>v&;8D%UrH(l3i31P91P{AZ0r8RS%iB}FLD4Dg2X)F0oHV8K80MgqM5eUVSLu$LB# zeKRpSH`mT0I5?Qy&CP8VdHe7Q_-KEfDVw=?Lq@G*n;v*s6MRV;7aLpKYc29B|JWYy zi$ew7YI89dRyU7USw3VoW4f7%5}<@hMEfp%Ov=&s-@-V^ZQm_ zhUyCdaaQj&))>i8!KrWEAqNaMvSCtAF{4wrWB4lJ%_#Fy+m4!mfB>7y;`o)n~6|sFJutv77(^)~vB+6w9k^Eh)c=gZ$+A@bJ#< z#daQxj+Cn0EI%D7t^3>1#X`Q`t>uWw1{;A{YI^eC?(dp?sXB8AL(?%XRCVtz_ z#->jv$cpU$^>L>0P_=y=9}`K|q|n&zF_nF}DN9)zBZG*FY*|AHGbLM$#FQluBO$pH z!mYA|tT9B$B*xsx*fL~^!DQz!&*gdXd_K>c=e#@b&biL@xz6wZ|Np+fGe!z9*x~p4 zxd4B21em^x#kOG?u{@tiZ7D)4%+FW`A`qtJbV!OH@#btQmOxQ{tZ~K+4o8#wy{LJ< z`(XK3Q3RlPZ;)ElPTuQlhC}Nf)-ajU4e)3}>zHGlROy%F5W=ZoWM|g6QmUgZt}B zpqI`I2rG{K7(e`=3@jGt)B|bKP(v`2p`VbH26Ub2zId{9>{cKd5!~)zaknC1YfIG+BWquI?VIE;(Lk z^VSxh<5G)@t~?qwlv^^rV13`f%rh!9G<01$3W|2?9li3*^rsMqJdc=Vwpw#v!N{sQ zr``8UkF486#yB>YTZL)83ZmeUe;0>=0;B=lodFN2&m(K!`;|sD`7tBrk1=@AbU-$K zHsrX9)1i>Xk!bKkieI^kc5(mWqD_m+{cG09!^0ywhq*xipg0rrCF(i*6p0>)&y@-c zT-v!_>cpaMPaku(x;NLyb=IHWW`USY(`a(tbe$@-Epg}tXt&BjH#={_rL(OKqatZA z`qBOKVUCx3Z%&?`#jaQ(+n<9UW=+%y(p+)75t>_ZcZk*&v{L#UJwb5xFNRNw)(MLq&k^R^!g?xKXBzuq)e_% z4p`!i@W!SF`Y?LEL6w7Fy3m4PuJ#U$>~iHaBM-#tw_ZcJJMc-BZ}WO@sjgalGUQ#V zR9n7M>1C0)ca5?~5hovD$3 zdZxO^6DpdvZeL+i>(&uD`gSdS7f}%<2}`(shi1enf;FTdkH9(uchBvQ8EO1!P?fU5 zlda;Lz=6_3c>kZL&fdQGg6Jm{#(s<>L4)}&SWqmt|HTCe^^BT;QHKwpEIJG+q;;Ss z%%dmPimw*k$qmy-b_!b2tZM48$i;x_)TEHM3jSYd^$v| zgfdF7$gQ%}@dp66{KPDhrC0l(Q}DIO1K`)+snq=or;o3##Qo>gWG~E^3=NG$YT4hBzpc&oZj!GgoLg! zutiWXJ7aV#K;w;%_I?}(2G)a|oVfV`mU_)8YBpB$##6Zrws3+YA?bD9nfNJk_Dqdg zw)I2qv{L)ZQ}00inXqSzOUI)D=O-{B+MS1E7$XBVe-Ka5AEJf1#7cLK)aCeAc%wXd zLbO({c8f_?&18rTbPRt9v~20I3(*1=Dl`C1rG`e8sygf=`mGj0J?I8Jp+OI-Pv|Dz zM!{jP?vXzug^5K@KSK0gjzquEz_*)=y>6BVo;m8OCSM&SI(~L!^vUUmKiIgKlDS2% zk{wm=ZDm@_LcL9?Q&{WgL{UoLjc=>FZ_mKNK)acX8UngS2}K1o2TTb+%(u*eCuRJK zUk_M@9O&FG_Z$;uRXo2{~kA+K6}=h17k?yJSZrY-2n8nvW|{U%rPGs zabwUmwFjL0E!SJm9#rn84=Jh#>5e}C{{6cy2wD04+X3Ee zx>g;lbQ+#@%8+1A8!anor~IsWccT1RlBw#M5#J4=~bfzIMt;e~F$0yx#K8Tj;}p2##A z!l|R-8T5QN@KTy1TZ>~sb-U@xq_~fW*`WIUqz+q8a0YVd>+3I$asQ*Qb=nF@KETQM z?C$L>X{?4v3TC+nuLu-P`;Y)vt-M~3s2Q>$BpRdce8+Ft&Gi-BSG`eR_0G1(I%twr z^t^B)?21SGle&FwwWKQ1USrgfzh4ee^(UC2T7K1_Yg37ZGQ$2Rb>sC9EKMxYtpAr`!m3BTdf_M`ek!d)3SMgq9*Y-n_i*6hjs}Y)3V~RR4a=A3#f8Mg zYR$_A%W^w&^)T|vM{s`d(aylSd+axPcwd&IyK!E*2K_*DrS4xU6(D@Y5{PQz&c@P? z=&@tZt{5kiNN-L89lF&!IH(02#!H1oMW?0Wl1zpmY1o3Te|{$p6g5*aV6zBxUF(qL r{D)EVS|nbR4=`{t|Ba0qiEU;dAoxjcL{E^!Vc;?~w#1a8T_64%=)98b literal 0 HcmV?d00001 diff --git a/applications/Chat/coati/ray/assets/tp_ddp_hybrid.png b/applications/Chat/coati/ray/assets/tp_ddp_hybrid.png new file mode 100644 index 0000000000000000000000000000000000000000..52cd8f875f84c6ba78f6c583655c2a84af28deff GIT binary patch literal 111006 zcmeEP1zc3=+6P2Q0g(_8K|;EQM!E%QR62C%7^F)=ln@b-QjnGs0R^N>5hVnq1Q7v6 zN+hJ4?;V}dF?O$eb=|$YA3yJ2=5WrObDsP^|K~aH2~k&7*o{kxi-v}_`-q~fCK}of z3h+M?>|J2XlQVA)g8xHz(o~Q}%WK^~frfV8(OFK%+0NbE%GL~xkyqx!Z;ZTh;CHB% zsfDAd9V3L>3d+bU%Lsv}*z57ToP_8~t3o|JPnc^7s9TwW&0r6EQ#-Q{`>2~*m~rsH zx4!=pJ9h;~QwK{G7}U%b><4xKuosUY{4n^DP!GfwZl3o?LAWg(k>^9%pk?KWH~|m$ zhXXBKte|F2h=V_Da)!Zdovj=`-*^gUZ*O+W`Li8O9UWnApYLW4vqjt`;;atfMm{?T zzCr7hsqNQWPgp^nEkEol#E-Z<_}+?URu-1XGxKmG{$gi}+!?XK$R0SHYvIeQ@Q%?t`|74Z?~=xhnI zfZ3bc%6-}@ZRu=h3$B=lkyqNx-O5=9<#%21J2xjk;xDprjdLSEz?J>}gT1q(2mGLq zA5e~h|LxOZ?>`}rgPJ;7n!)u1Ke3auBh1E324)L$1X~y(yu30(!a_hS(jUyh!Ss^^ z5JGW+xj3FeDv%FhHKvXhW(Ywctr9MP_umP5e<0G7&0uzB;NpNP9L;P^ovmC^cY<)6 zB23ES<9^z;oHDhR^@uzW$5bM+)x0w-A&Y7s!ff8@w?kjI5Q0o_pP7gNLaaYxB7#3zf_5-hMDT#85!B%H{xi(k z)EVXP0J)f^48o|HdYe)Tld85daN9bD?`61YS3PJyk%L#oYPpMPRAc?*Y zFeq{VkC~;Ftu2c6nVXvloH_-h@l}?F3JRYDrt)KQLd15Y(;~w&D?novZjsq#}L2<0~ug7SUKlZ7BBc?JF)3I0kW1h5K@tWYE< z=k8$UXk}&(GBAW&#nc9%+mG1|B|ZXR3Wwb&x%u;8XAiT7qhC;5e~gTXP2UflzXb+9 z#pNFjqaW?{>%jTV^z$>a56^FxI=@%hz`@M7DjTGFQ~|~CK})|;=)iUSw^QC<0p@rS za0ID*q*74=J`XaWp<+2?3GgFnlLrafKh?Q%I_h7=E5LUE9tZ9`-?D!chx>3OYEgtd z!FOZZkDiT?2OmN_DB?ig`A?Sy;#4SRiAud7f(6R${z|Up^H}!#^a?`gsPqZ~v^%4) zD@f)41?&nUzWs)v|FIkwA0oG*Y5|#A{{k%_Xcv@Bi=?F-z%Tzok_#DFew@PjyR%#Z zzaQ$p+XFu_%esL6fa4!{O#DO_!#}_S`TM_oWSKwE(eRHSkNO>!`L`qAXZ-Wmxd&n2 zs9})j8*vc92>m<Z<6h3q z$t@s^5DP*NeuO`!Pw--QkRVKIIzg@1t@d5{&-7b#EccYE<*e!v0V zGe&eG;N5WP&tt?lD#*`q6g(9Fd>my9?-@&*p0a`WFYTeo?f}%x+|=cLYx}o>*6#-X zFVi#fi$RV0F%dX{lE)c9lJ|oZr)*7~oB%fu??!;l@_>*7@_K*M_Rz0)`YvFH>>;C; zR>;ip^N`(VedXU7A3$CWiU(>tnp)W-nM-bMK(cT~= z6hzhkpT!s>9rHI*;{OguiV!<0Ie-ips5yiOfk^*?9P;}V%|A}k$cXjpC5^-hD2d|7 zvcmsfmG^Pp3N?m&t3LTDWu5>s=6z~A{Zq<32r}@Z2=ZHU{294e;A?>MkE!z@$nyDD z$nk4T^9h##Hsmw3j?5dM#U#kV^WKcUwCb9xq^ zbtZmw;r*$iH~X*aX=K|S6$|kqin5=Vq5r#12{JSw${#+&2+MD_N|4Cn+x!27UI_xJ zeTE-CjJ?2#6A(a7{gH-!jl4d?tf#B*ts{#={a+Fm5~x$UXdg{NGCX-^@13yxte6zKQQHVGq=6~Y_@dB1_{kc1+t{(lDH!jE7NQDPW8=xQN|t^eu%0y2Q0W)>uj z{q>oJAEDKMjieFK_SZ`qi5@5G2~lC%*Vv?Bel7}*Hz3vr8X2mJ{gBn zWPeMJ$lB+J#Q62x<3FYt6hMZ*Um!>1xDkr~f5IhSVt7QlMv2OQ4zULpT9MI_=$x6e5~=) z_Ro4WKSAhUPW&RV{(rE0WOw36;)fst%c0o)zf$Di+kO8jGDmd7{&g}(COkyZ!ViJh zupi9rSD5-AYODNnqYDo*RR0Q@fBi7nzcaib>&jmt^RJQhzeDCyE&s1_q8~Y8q+I#`W54oSU7&?>nl9u)qEB<{@L>xUk2)5I-Vi)HE8~)lp+G$U(dPxS+xhUdyZnZ{|J!$ zD`hB*#Gjw00=^5wA!FJ<@Oj%WLW98fhK8R3;E>A}qL}pWaBcsbHY&1|K$eMupz;-^0X+a-O`frFOMmv|8GLSAmRN~6GA@kkBmaU)P#Oy%_?(SGk3&d$NzbKXyobwy6|^teDB+wKN8<4 zNBz!wHa>eE;*XQ!Cxw7J!vWRrK1usMk8Aw*OGTriGC;GN3!O51|F*#oQRW-j4wWiL zF82Q!RgTc`&!fu!t0k`lk+m1X%8{#}|JIUMU+&M32zH45e|*U+q^wYar>rb|O>6Mg z;JivH-gNHrjIsu;^GW*V6-Z%SzKNQW7t9YPBGvwpZe|0oNmIVkm zK(6)qo6+ox{gJcLKQEenT}}K$qZyJ2`r&8>UU0_E_r>xRe_=EOX~z);1FwITW8?+Y zl)Rn00(euqr3!cpDR{R8Vk6Yb)WXr!?&D`Wci>@w<&m3VV!VQeMvHbtR!ZC5=xqX) zvv&8>N6Y8Vkz9^>f=@l6MkK9)O}qzFgGdS!)0-AUsZ$Zl|A|*bhF?~apP$klQ6iGv z#O@tCBMuo}jyZTx8W*RrWT`&hjrS9XyLwr7u|OC`}T zKl>*sgF&{tSz3b@4db(al0+2yFhARZh7m{X*YJdd33mrNkpsE^E%4L-rdWr)w5KfR zlsXr-9~!owpBi!PgUQGjm*fNdmiPEI%vOubg?>1pXe^p+;Mfv1I~F>TI?-j>hnJ{j zH8hqbIE)i5o5@ls`LPr6@kxo$(Bc2MW5CXLDmAFUSE^zb*!MT2`fkTtF4-Q@d7?nG zheKa}VX6emVzhr*B%w4-hsXp&?QsIt#-Q845%k^gFSv|j@@g^l4w10aw0_h`PfP)G zzw|XWw@cm@tb+NC7t6MbFOTfqdM>@-dssns`N7!GklpZ0{}Ihc;d^&eD=qkLE2v25 zF>ZG7Y@Md5-Bb@+uc`0gW$V)X>y2tYD zJ&WF$jLxx=rXpxeQ@PuW_SQy>#K|CnFu{PZ+vu1VX=nW!Ow=A`Ysq$Io+^vlL+vNx zohTVOG2c8zxw<4rA=^nYU)}V$iL%P(_2HUz(fw#>=rjyy5ijp03_wTwPsgw~-}YLb z=3ki^Phk=*W#OjA)ZW{05aJHo#TBhreVv&BlTL=;kOU2*b#L%>@fqrZC$CGR^v^RN zI#jnxe0c|^WNjplB-GtJ3HjYd}|<5kdjQCLlveKqdEZeOp0AOUpq0KW`OhKPF@ z0r@>f%^Cu-ff-pE``+6G8nzJp`ib@s0}FA-@V|BHQNj2#ix5tOm87yT%7f)mOwI>v z7d$Z{c*ho7DlLSIytcIN-+g5q+19HP@yt1n z{lVkpRYf6|WMIeL=)w4e)Wj+oZPaBzbe&>d-dA^H#O(z(yPpLY6TJnS?;1~jz~FVOgFwLZJ?MhcqK2Jwk=w;ZBo31Ned~mxIio=$%e6i;OVQ_ zZU%ez18}RK$IO1bzTE~Z#?@rUFGF`1d(2I^io6H4vIkReg-1GItGMQjY zh^{5Hf7Wdv67SM=;bo4d8xznJw_*88UWGzjpq~$4ZK;UX?9G2JBvrC@2R6{JB-5P` z74ygL4JR96jH&p_WQ-OXPmkg0Dw*jrpDQTXC}b9M?g`%G>!s*qa_Iec_=UqA_6~u9 z)0srzeOSBE2ZAD^qZcooXRd*|l4-=8pVniKDeFyoDMgK=B?SUWEf$VRSrnnGNZi{_ zATZIa^=I)fEgF64*+MbQ3OUG>nje?z?>E9He4}_3_fvc^QUx|tC!#aa(m?0VBYJDy zg!7YpRxsguoHS1t+yM@p6upWEKT!NAT64O0>3q9$65N*jUu=a7o(ir%c`cx7-+^9# zZVI?Cy)UB)_^g{`rZtMIfHUl<*sd0&pB;XDtH`J~^rb&Fb|m7WC7~-H_Nt|(8@8hJ zq~qf&vI6gjkM8Q~dSzv0b?@4>YpdSnOi$cj;H<2!zQDuBKZ=8kTmEuvY&odg`o$61 zZN}88@)MVE!C5QABn|1TbxIvx%A=#BhYwVF35s%X*_0RZBIST)%OavkwP5Cg& zMh^XAcJ&&cO~0GRax||&%(5)oQ%Q)3lGX4uSg|)`XvM@5h>3~EJJOY%-*mC`)%g0Z zEf4zoj;*{KK6oCNw9opkUhLj>L9|_PDV7n9@1vyb6B8CLlM5kcgwWH|&+}V%Wl7$( z?#}XFozt##Kh3J0MYOuUzJnH9NRF1CKH$ogE8`EtsGMKk-znlT$1wG*a7@*}pucLe zCx89?xpNyPclA!}Y4^;;l|M=3K#yJ}CtpJ+ccD^0&!Frqta{Zvf`D2?9h|Jc*FbwW z1|e~&&!&i7?`{79-|fu@q2ycxsz;ArKPszy4;QY&;H;BCK#~#?HPv@*dRkYOYqkw< zsKg6g$*SFnAx~t|e#c|pe641Cz2*VE!le}pb8}8*Wo3O?*|CdRG%kLAIhYH?6n3Fx z9BW2XeRo4IQ3>WL&X2Yj-X&a>tX0R5*jk}ixh@m|dyNBrVD}15nSHc(VVKjY7dyPX zzxPUHq!<2vA?_UG8Xxqxcfcv98EOw)N>`4};L-<11cL18CP@bJ6A46D~J&n zdQtj3?iqt*N&icD1f@zQ zHIc;^EdgrO;cE+&+2S7%U3u!i1B+8P_vwLNhCH0(B&mDQ zb7kc_hlYk)Iy2R{gxqIM!j8)D(3%in;E}N>N6;$_s2(|DB_q2(0+$?w+~qyvDzY@3 zx&`M3yysPBY^e2KcVhy@o_RA^BT@3sg--8fcbMF;XrLb%U7D4oWG!Km&zjX1r(wDK z(K=IRY#|vFprwU3Ir#%BYHBd_l$ZFzH+G`W@10#6xtD3nhS!%~T6+WI+ zErCC{O^r;|ge#oJ)s+nN^ruK@{rxI%vPdcT&jti6`z*98>^#Wb{~|ie7>2>lXXG`- zzhuexpo68#D$SdSJ5ciF{qqtxjjGl{fuuRlR*^sCR^V9MGgL)s*H?5z^z^F-5A{pz zt1^sLm?<##?*+^P5V=|yup-T5Q?HqNX4kX#Y5h^#{J)#N9va;9HizC@y9ZT}f zkO?Aazs75Eqt0^0$$&OFP}0OC*;(lLNK*|0g)>W>4bCj+vpNz>*Q$5cqgnT)BA1e6v&SSjjhd*#P%V~@2%{3sMA4z4 zn+=xw<<1k`dreB+MxD8B z9(y*R7P#SfxWrcS4lDwqB>z2BRLU>PUDA9u*EH$^E8jG(hNh`Ta~W^Vu${tCq z5L3v7l8IG-2y`Ludcf(Wx4M*q_WP%L?*z0ROimQ@dPU+8Nk9PO3sqa*0s^rFTI{-| z%WJeI8_jymu{_)Rl6;c*nN~&y2BD;96MdHfTmZP%C1jiyZ&$m@z||7I8{w(f4<(xq zylNTRu2jznnk`mMQ`i-aX|RW4GmhUzKukd3sTKKrfmyRqB5&9MF+F0+Qpwr`OdXS> z2_n(%NpgeLXA%S$Sy@?6Ke|LsA7Yu^c`X8tN;oiqg@mid@ny$YUe$Z7Z$EKJI}Q*1 z#3%;=?6B+G-of1ClQn2$m=_s_iW-Z?oIWrt4 zc#dsUP-?6-Aq~WEoG~eBA_px0rK#sy^%qDJ<}AHu?_n)WSIqdFx~uoRtj@d(2ZNSY z-mk8E2?*&C2!tEdBej(y!9>-9c<2_RxqIm4q3nj`&W!i_;?kEZ2MID|`@Fog!}~=kQ&y1~#n>{3OggPR_i;RF}%7 zYS(eJ@v*UmTo&IP^HpfS!431+Hkr*J6XAuOF3Q$SS2DYinK>`4HdYz3qwl0DMr-iJ zOq-s(&Gd0imP-#c3e8n6gof4~z;{{_yHz0L*HbPpFW+(J)T480s+)z-H;i&w1faVRRlX%5@dNh2_R-X751ib3p=)S4k4;j`wlO-Ij~%=FV_PRd4KM zxnb$Ts#&~~l)d%ffRmBiR{zlurWmcc?gVG9mNz!;`-=sQO__~>N)@^K+nR%l*JBi z)%PY|0U^MMDRq%C2 zDrD=pW0JD#MQqHIJ70a&Lk#V&QYqQn+TPNP#mul^to_D#vtW-w*rYOxZ@uQ73x?T} zI;Z9cap(C(37*GCP^!7Ta_ekyvSD6%$;?Vx_SiHLglOWLcY8tvTxN%)tlLv}5{x=2 zlHy3#x?`lIq);)b#B~j{m+fnPtpBhCga54GDU8(lrxIi4wgq~*Tq2yLERwY@829M{ zUbKw0C2@^gb_&9zSq)0L4J+N-Umh7?=Z19Vwbva>a;n-X9y6b>wSBOr_R5tfCjJo~ z>%?LP7FWVG9IDEEsShao>}(byp{z`)Fer0$o_xv?B!wqr2cX`Nk873w<-M5B^tDbs z?}i`Y#c&ym_lR7-R^rS{vNOO>34TTA@yN3Pth`#NJtyz_2=km=Ai$;a*4Eb5etwlD zP}}}ejSI|+FWd%4bPsbyl>5A;zp0&{d5SH94$Hp~OCm+Oo+iB}yfx#3N{`>ANX*u$ zw@QqOS7#h1v?rhDrYmZ3RbO3hPFz@U#szR0AeaemzKqBWI$;17L@->MW4UO&wl-F! zJxJ+AVA;Go%cLZI!DSr>mk&<~L~^o03njSxE3CjKrDS!%4}tJ$?{@(195!HenHXYc z)yWbfWm(OtgGA6W`9H9!iCF9>BsqQqLdqF1OmUr>riAkhP;|JudvlnFxmnMAG~0=D ztu428ZX{jdtzoigk@p0)HiokRF8BF$;60=bY&qkVBi@S*=ok-)i4sac$?jtTvf_SI z>+CyZsRG7yLyu)5E;j^UxR455yQ5Q0B7y0V)^jhd^hZYZ2k^<|sb<=Rs%Fl3-q7QE zd(Yv#v9)MI=%oBH&j(}Iyw;bd_^dh@E`)`-=-|l)%E&6BVFd0S+Ocv77>v!6WGRoQ z93n7^0W@uF3|08)$v6!f+N^1YO~jed>C3b*`N%2uzcA^^GwkqOs>~bK7tsn+d_+~3 zU>!!tD1JaN(pq$vPoC>)$aYueoBXkKAxDUUNjoXX`Zae5?O2gkCI@h57NKF>%usd~_f=U2UdV2cm>iQVD2VNjjh^DiNJP=MIGP|KsP!sjB+A=`>4kEk^@-cDQBhpnQPN8M>2%%e zLilOZ){%0LctvOPh~>pKK+&Ii<7f;4xRfAC#HAbrB{BY6W^r19k$fH2<9FCdv28An z2f6lI+<4fTu=2urzMm4;N2uxLz1w-tG_rwgU6&OI!54X|^Cuq-#&@}V5YqKd#-3=F z2yEQtLV|bk@$=a6mm-n)H{0iPRwrk5=Gb)Pol?*vUap?1)7Jxaaf%pDL`vQ;E$V^J zB*2lk21~L}cJlA)8Z|t2{rR(KB~c#Z&U7sziUEFk(K3EGW#I*6AOdcHTH^ICbi#vx z7m-vj+}e_3)wv-+%GufK*H@WyK|i)S<9UbzbLEJ93+1ke6bdDh8&~|=LM}%78YH)h zDYU;FR6R1jR`t|>RE0|V-aUWldctGZS62mZNIX(m9YXKVP4qc?Jpby^lzQBpx42^k z6*ADHamxn{t34A&RrX2Wlk)eVv{fmU+o8~wd^Ke=OK)VZmA}=m3rIjxo|JaT2QG_+ zdo*lyStnmx1Ap`92kuAS6LxbU*zI7+COZ*DT#Bp|E6z$(!)3V#uY~Ed8i!pNoFmP> z6=U-aV$VISS3Y4idt0~23`Q@ombs!#raGqbB2NRS`7udK1KBHUr&U4Q28**Lx|8Fr&xHHVegF2XM6 zkgH`pNnP_gFAIB9sylfGPkNO$nGIZ;;kyB@{XmJ(_Ep6qvd8+VH!iEfFvJ-lP_75v zlgIUz8;h_iNtN!Qt3BKfG*Ysr3`?qt@^m}nfByn$3Qs3_XH>)v2ADkOhNP{VhYIP?j%^sbbfgBmhM>l&1Yxp1`_c{5_hZ4 zgf^%5KEHjkaAN5w{(g~wlk$s^ z;&q7^ugj>VvlXO=B?1|h{hqOE5CG=el`Dd^SZUFgk{wRPaG_dW54-?Pm77&po|Z3<(eXB zn$Q%~ppJEXA>&f~U6Iy?-5B<8DpnG!R0hf%JLK3^SIG$z-haG^jRzKTMOvf^E^!r| z%sCJg{m+B!L0BeLD+52l6Ak%Le1aTt*fZ;Fv-UF;!uY-fz~6{&?|bRwuzMd})_nyW zA>LBHDqWhvj%(Jk1j`z4-^A~ZNV#+nh};L9`GB-!r$@6)pKa+i@KpnQnT@?yava-| z_wP9k+%sM5EZgYC^ds{UH`3Vz)f3{5QBUuA?G%552^Koy|KzQwKPkAm$snc=-j7Bn z1G_I0O$hAo*0>6_ppDD*2rA6}qaaoJ5pZil9h~dl6~=1LK zYyYac_)2s=cIf3TvBT}P?Y!qwsU3+T>pSJ4NfS^sabj>1yYG_VJ{E|3cGm@dTdaKpbAW-qkgq+m|{VH{w zCx*{}%|rle12!WTUmKJrzRhRsvrT?e{CJA`J4ihWZ|~>W&+0Iu0ANt&&ZzBpk|Q zO&(CGwO=Ikab(G_I*Lp~01RM`=e-4DZTR3biOPOEH$JNPWn4N4L z)-KgSH$3++HOci!`I(@G!B+|ty4Qqj$s1@CVRtR%?}YBMT)X_%ri#j!JMjLSddsJ_ z!T2reskG5RbJ`frCtQK6zlI|QURF0Fyf+-%4~+Sm$Y5DPv5GqbG4rjq=Jpcp8+ej( zLEyoeyWw=k7oV6-bamfg&?BZ&sA#*M-c>Q7`I_;Pw_dvJzB65gR92gp-H7P7?*W|j?^TdjE$`=XF>XavpX;>Js-Jh=Nsq-<3~TZW1z$+2^Hi(FA2Gb zfYprK8@;gcx8n_^Uh%3kUP%YCpN4J(+`ry3JwL&#IXq!tAb7Kbe&8UUVD%o)i@}Mz z@$kHsre9QaM@B}5LZML1mG$+^pmXOKM%$9MWxc&cu|mhTeO!8p2vtDrv3tZpUA;7u zDI#V>-Wgk_IsR^4(=^jKB_*ZE z!@;Qc}V-toFXXlca3l}e5#5m zFcKKYx4!gT_Tdy&(D>S&@GQ-$#9jQ|UN?okX7{pcZ)mKokK5kt?V3zvbD7C_8Qt0&)CxW}%)G~&?FtC!tZ=SFzn zy?dAM6=?xV1^0c}jOQAN?!UehQbbIlG*e%jnRw%ZY09&sdaZ`C95&s_FLM)Hw~j7| zPuwwa5iqb(yKL#(yO+Y)#n$jJw{b(8|4uv-%P!x`+Y$UZICL^&=tK4O6)I5f&K!kU zP4hl}&(b$%V%az!VQz^m(#+wpA4%Wp4$phIn2vzj`z>T~Lu!qK9$hiM2hnDAyhgN# zU(hB6cbe&qoGcB8UQzHa{Cz{R#n+FJa~T~49O-L9WBSx^L(T?Sii41%DUg)yJmPZs zIAjCGX3pCk=H)GHXq)j3Cj^!hAC@z)bk%BjdHUXF3En)h8*&z4WfiN)=av^?_A>=> zV`Ur*Rha!^{UOn;2{EF|+ukJ?@O3R2^usmp%wn_Z6wxhN_EJDAv4&mm3zvJWqMbjt zrA)kUu3nlRrRor)Tv|y-EjpBROdK?J1z#M$!l-Jz98QQ`jOI>Fo zVa-eSTuksdzEC_Z<)uyJU_SJkf6yYHR-`mqV0-D&3Fq;)7tZ{P0_kSvS(caF%y%!$ zXKG6A%(j2_aKSQ~{D9@_dsX^Y%RFp`8;37yLgu`fFiCT9mui$p4qV85$S;#^RCio8 znn5H{PTkFYQ&`z?34-Z9Iba2tP?yOhbi*M>rE|u1E%{8DuClk!76F(Dg)AAs z?j7V1-B-L=ppd4KCcnWm^K_wi9F%3M7n%KU5K&-U78MF9q>~9-mJT^G83ZMq#)v&i zo?s!1N6s;mQj4ZHSp9-@dt)_JVZpmaB1`8E*3A`{MWwfmL_5Qs35gtX8ch68^FC8r zWGj~K!6xqYo%UiSBWA_fO|}omH?xyBEC+8?zQC?RO}#w`VaHF+JK-mR-w zuVV9iaqnL|G}9g&5+Y1QLt{Lsp;zbwfQ?<7B-FCG(>7h09@6E>6=g?Wr4|Cl`V(E)Djm z=e}yw==bq`)0dGrzjMX?5`R2lhq6X=ws}3)U|(<+wQq`Ow7%?A^P?%0Zh#uk~EEi=`zej!>l?=sft8=;dxZ-oKH}KFQCj(=knU zaes@PGfS1%U`4ZAe8bf$7i|9gZVE4MgMG%83a6U$liKIE;{wI@an25lQQ8#mzcB$5 zIDJ(gE8?9FZ~`}@D$l#g-PY-V$!P}-N~d1XB%_8C{q>I?DIc_3ow4q|dBjRJQS?Z{ zVYfp@xW}qIT`E?YRIe)nV!ws6P&-?*5~nhoX}ZO)!9hqiL22khL+`@WE>D{W0V&Zz z_>`&vgTbyheJu9QNEC$&cyI0uqvDA;d-m-0mOPQoXHFJb+n0TuH(t*?Gvl`*mYZoY zkd_N2X({wJ8#rG$kjY5Hd8J9W=8erw;}U=C=Wj_b$2UBw^Gdlj=;?mw{8NfN<#_KD z_1x7F0oatM4Q;yK@I-6^v zlXhxFx~=J3YJ<+Ih`H-})je^GLslv>y@2MU5{K8H6NexfZ7K)xM(9Hauiv^=lwEw1 zo}K;em|1fS|46zf9i0r>Zg%#SD?1e&931ji0rL&-!d*FsZCnYx2^*|FJUl!sX7Acq zJ>k8uM`LC{Hop=hBI41LaA)qVA~K2%B5_!wZJdz#%>{Gg2@jp6#&r^6O+TMY>mjah zFEFN$$*jg0Pf!eP%y3+j5xn(IJ6~1Iv)bkG+EPV+;Z6dHp*CG1Ejm!s)OT&I5->cU zRlgTxxj4p_J`**7VP^QkXgb1>tf%{q*I|~bIVVOVFFKiL57Qp`Hovqo&2Ot$CeJ#Z z*6_9<$!g$2@uvHrK1nKa|#kJn7e<+!6pLK*1 zE5h{3C5NW~0a%2L9L)z_4WHeai%*o(^`^ByPNqA`QZJt(eLjyy$RSxMDfA#6q}SRb zgk(F{K*WrzrmAhF|03w_QXM&Rgt1G5SG2`s3gK^ZEv&sQ%LUqH@B zd9Y+G{=xlJh1kR^Lqk}jEzh@Plt--Q{Kh{q^k$d%=KDn-%{k6@ZSvUSyN7x(FGI{Zdg1dE_IzS zx@mZ(DVjy?u1$C2lDepY$n`_5`xMfZVti^3N5nG)G!K>A#yU09$t)Q)C|5=E_$A6J2g#}xd^altn)oaw!;8EhR)z8D zPWkA@tgX`15nO%Dp3jQ~7~Tp{Z5Te9V1)GE%-|iZw}@bjVh$|`o@T9on8UXX-Gd zJj@j;?GffTwKC~iW!vGDFE(-;-|TGv5|p`W=N#1vnMRu;;h^_3PBzWz5#!md{JX26Q*1*+D@)|j?I=Fo@@4~mKCF@DCVeqw@RwgWedXiZ+2e+!;yPoqdGoNHpuG{bw_cdvy0w2 zu4SChG(9Dv64AZxHx2|&_s1wV()r6pu+hmFWtctRf|dAic<4b>M&E`pc@FQosh5VY zGU?z;aaALB?x4r`_*kXg2Sl%uv6o7)H2OLfv1mlxOC)lKbcK}|Mj)}9A-V6_q27mIgm||QBgm)r5T>M6D?S^(ijDkG3TSg;i z`sSHB&vK8og>{Y|pL-TZUUv8cUY0;TjF}h|?q*f8@(lSQOwC2N*qbt4)dJb+gfycn zL-Z-^ScQdk*F?D@?njCGs0H?S>u`=ZJULA1>-o0PBp=Oq&Ig-p&zw=fCf2f2bLvWy zo!YK3i}rgHwqiTVrW!9V^WPpH+9WNr6CbOhY8qEBz&4o)ni^YLk`8uEEB7PQ`Vk^NFt2x~00GP{ZGU#!5y8)J^GR3iI>v)$h z;M3zh5@%8GNE=-USdZ(B=2sR##uAYhjNmzCj~l^AqsxB3J>7O0>d;&a8&pm5-74!+ z^jhb=Ussn6y6@6-b!@51JlMhbK>^}d9wpUZ32)e10fj;DVWxoPL%s68z|Y6hB}zaJ zO>&#M+!}N_P3km#lo$2ubj{**7I%-xTniZrL#0c{qhpz5IO~i^Sj?QW`&+ATmoUj# zah1pwhcvrG=2(jiY03#Yi!9w9Bc!Z)d1uXC+*PD>W1dRw*&|GtGZ*);tmYsQ z#q`XMSatx@aP(adQOgC+3RAq%qfLBn)7p633ewu>?tnjjS~YM*vH64esTdUg=Y@P=-a zl3LHL12AR~XN?H+dQ8p>7S&(0n>`Y7dit>kWrrJf!p0`_k~jsQH*M9sR;sADICMn7?}jg7-)g5oc8j6W zlyVwVYf5tR`ku>CJ!QO1R`+g&@cHy%D3wycmS^bX!a_W+=wpGaOd*@}U-P|Box_`^63EUUHnB|@l6 ziejS|Bi25R(YB_BWNKKYD*y9pTJKlO`8g*v0kc7J{H|VqT7uD*fAjigPFO@w_yN0n zzVWN`V8(rWCqd)R8;g%87+)ro<2#(^ofA4VR7iK~&DCr^&!%x(^oXmM73@_QRL*L;S@*mnwbDLQwZ+?4 z!^C+aCb^ zs>foUaY7Id$QPjw${?oEbpu_c{XFYAJY9UoF2 z+gZlK3u>()K(xS%zx^Tj#j$D_J$*T#^0#LQhPP#e;!0HWwxaB*#gtr(rr+LHR3oJr ztm+bHvtFnmc#Z|8(j#tS5>afkpD!3|wCoeTQf%8Ny&1IovgmSU@Fyj_%{Xx{HuXft2k;TD=pK*0Z^2$6T3i z=8{NVa2N2(F&E#I?W7H&tPV!su`u7kX;gW2_%=;&cz8b7$w-P$P*Fc;1C8f7K7A_y zv5R$=tnsY}E*j+-pE#xk7~Yrk?;CpU9+PEENjG-Sv?$Z3*7i3oxklVsuUhSi-yUSO zJw!}xpqeqS8v#cim2J`kk?qsz#YIf^YQ=5=u}UuEPa}$b78BY+@RxLM``vcClLa0` z=w+E_yhlq1Y27+~Mc_j1?(b zCaRK{*%4O;-9^fx$KW+A7>Zx5V_D>x!q0K-?$w7!sbp*O{b#o*+!>Iw?iLrgar1$TIS0;oZ#8!uh9eTz3b}-Zd|~cUvSavKa;q z`i@kJYCvyLi*Or3hXrK=$Fm$c5_k#ajMyOisAnFPx-N=@T@{k1YxIKXK(%jaMhwIk zOX_h(do$@?5NDiuF?)VQ^D$kC%0T6ViI9fTjzoM5xm}_Z&m?+3(R0=>+f4}ND_o~^ zOfC-73m8;zTxB8$S9%86I-Hstz1cA2n)S{a^c$#frpYL5H)B+%ByPP(*?azY&Qtwo zlxl{YjI0nML!}qJE>To;^x(Sh( z9bOfgj0}1BtghP*lE6cQIT=Ot7F0k5<+STzkFk$T#)}5F`HRNg@h|<%GCW8@#RHcnroc-PJ#<7IKcy5-Tk)` ziq`MkzI~gpuC9(PoJOKLnD?cf@X@JrID3h?jH|s5iHmQ!s0{>|HsjOPHEO)36$S!^ zGdW~*0nejiPo#}h%5@c(yHF0i@okp^;4uvIy;I(ue_9-SSZ@bzAc-}CYhJyMAC6}l zIJ>UMdivqB-PQ@OvQ4e$Phv*AypP>P z4yo65uou;n0!?W;fOKGRs)J=FPqO3nLY0 zHp_c1;LFAmQyih?%LHclo{VXI+NcybRzDJsutPaORaXEIj|EKt`#eaNFIllkx+@7c9~F$ z+Fo}~W>+}Ou3t@jcXH*zwB0LQAU2Itw2)vBbDWFD}&(eWu}o!s{$tIUt&ZDGN?)yi+FGQaoaaxvQsUd1GhanYI18GyN&e zI3do%Gl!ko7_Xhhz+jlyandg7f@ni;KRUbfTve#td5CCvaKmhrQ@pGK%P0pO+`u{3 zG5|Xpn{S^E^dtjD+b+>KdWoYOi9~bxN;;c+j)F#v8QH=H+3In;v(j@~*%#`wpY9DG zFOX&BieqQbV`OZP)?ePH>Mj(d7G4RBGFusc$LCFwv{Q-CsZTae*yU_SQcZsuTNOVP z^H|QitQT`0Z4ZOpc6F?ANyNUP-Y*7h*jBMcPn7LhfyuxvEO8nMPv%^mE&mKxk-8Mu zsz(#E{@d09eg`2B)_0mCtvQGLt?o!NtxVwK&M&$na*3TP9*2XHAly>g7KXe0y>Wa_B)JSVb}YK z5GqX3bD$tZr%nf)1Nx!(n-^NP#nkK)vy!i=5K+9Ms@Y?xRA`E&7Ff+3#z76v4Ey$B zxVft!@rQx{(y^S026p#$VT13k_glM*0jeUH(<}ykS|EbQYm&&?D2I3FP7-cByzo2p z{F?-JtBbszTgIKt3_>-JdhN($BDJPFKEQaD{c-2w@4la)G=D!TGGn=U+19Ty--y^7v5 ziLaI<{Fe0lcBOk^gQPQy(*_Cn>eAK01G+~9BXhNDPAWS+nMGq;N?K36aQxPl z^F(NKjN_)3*mc$@&I@9ct}E{jT%12aJU$!JyrB5({(X#q(EBZVgH%LHSm+7x(H#7% zp*^6*fflK9?EKRWZdeGyD27}k?zIf3YhiJ29re{~Q&(EEJEQ;BKOGB;KTek|kBpo4=jiHrjUk1uU3PSj^;~|3wDOIpr$gqsu)$P^=_VliBB|9bV^9@fjSilf#5tCp1-KC=%blCe8!Q>5EFX*>Y znG|H-Bm&K|Ffxv|_~rKj)3RL<1k7r0jq;$qwJm*hhlxhlMc@LI_xoJ^z_2{GP2fW^ z7w7(uy|?g+vR&K8DG?N<6hXQ{dME)Y85(Jol8`|V1O%j0QM!hf4pBf6l$KOPK^jph z32CIH+wUAk_kQ+%-|zPa{MPp@)?Ti)XYRSLI^#Hx^SGo_a2&oeeD@V(z);Po@pHCR z@il(6TqxTl2;B^Z_!OCh_^5@;(nNQF!=s^wBW@SxM$(RlD+K%MW}1E;)Pf8Xgcy=( zXdB9x%gPjvf3UW)YQA`ZrYDb%Tz3n@1oLZIoPvkf)&}-ma^%Diqey?3EVGo`Xm3U3kGphV7u;YkWl2*!!QE0ZtI9?$GBS1?G_#tT;v0OK;QH=1%gbJu_-Nmai^!7OZ&c_V%twtiUI<52OVanICMP++dnJ{7IG9w8xXylw3uEMEqTwuaW#mMfn$$Qu7*C^8iqosl)$`h~Z9Z|72| zf`&%K(;t_YY}<{oR&11(;D1|RGe%sQen0pSOOx01dhCkk4Z`g)L7^wDWHdqMO6(b! zMblc8=s$%=0l!Z|kjWZ=*<|gASY?2n+?=k{E09x2(F}L|X@X3rp8{8fKQ? zxc;0DQ}vv73Tg$0cL79cx}Hu>PH%+za?r_@OM1wA+ln>6FfmO*=?aInhjZ!-MuV~zKFM~)Q@@m@e|d*14OqWWI!O0TKW%0_J4N#cD$;pcaT-vLfxULq^}=;FV}N-?%9<YU)l-{EW`5bOKfg|S;?JZmd`jSXG@26HcyIly z`V*46&v!=aZ={&qw6vtZb`0Bc9w0?T6iOo8XZDOm^`^yc#pV|*qsG3p@iM5W*=A2kg$m1X@?#5FF%^Y*&R#mC9QT~1c?iA(&WQ#ot)k# z(1w#R6fQEY2c1%PiX+-v)+)Y&za{PWg)@`c*b#R|#3OpB*Z*VJ)6DScpqT}xUd^DB zM&Av={JrLxI!Lc$`edcC>aO=pGp{FCdFBnlr)wfr7XZ(u|5PzFgg-vpoc;O=FJfEQ zi^ag8g(dL!Z|D&jxM6SJdWA!PkAL~2P@>^AO-($9!{o0d- zM65oYQKh?@uPyH-#Ud-$M%xuHer{@1O`NClx}P3Pg9SQCo%@^1ptt&a&mH^Tw}Hn? zzgMVN3S|%j6kV;ugpI#FDF?#9syneW7x7q1j{n(YYtIJVPIooPX$O zp1(sH&NtnZhT3`^`#i(3ZdfSDAiyaV8AuJ>_S__GPEcw1_r{_1@TP6vd6ZDpqRl^E zewJpH(7M{s?|%8s<`cK0TJ0yQOfw>ey}pegP!(E!svmbIt#o)1D6NO9c{#j@i+l|# zlRMobPd&6&QRXANqahO1YfD?AL*` zdn++E@b7hIleo z%KUy#MVax|q&=p0p{Qm;Ai0=N5m9S4TF&rYM+PlQONe6%JPITJSuaL=IBwT%qC&z* zhIYEY#-Wp1w3qZ=Rl4`aZDL0ze~-Iqx!jGXPZ703krqWSbwY7U2rrOTi*U$%nZtI^ z?ZY+H$}0O5d+@m4NTa=CP>V6k5WDf_@OXuKYX51^xJ<-};Ma%j>WSLWr#=VD0KH0L zn?YQx?vtAwA&P{K1d~>Gf!5SXW$~%R&#Qg|aWkU->)W1NUyIGOYTGp?AGDBVKNMjy zzX&Z;78RcAMJ+0tnu(@FzrqD|w1S+q2)kHc2b=gg#iw#M4fwj{cq5?am6@5jH`cV? z!zzFksyrOV(T_|jITe%M)lSu7f>Gb));A#}VviZ0KEGC!(&Ssge93ib0w0O#RCrG)g(o`v$c2hrH(lyLLB+L}kJ`;1AfP|fi7tt=}Sj~dCv$b5e zGo9LtFw9el$yIN4TwLUH1!Sky`(X0;R&D2i_Dkv#cg>*J(qIm!5JH}!?*_~5@}?U1 zojcH+fsYJK$DO6e(@5HlCH#dT<(Tyq`}XM`CC$Fz4d~mvy(Y-`dZM!p-4c`3yb{ae zXZL#|T2@Ux8|8Q(afYEXut9IBRa@yzN!@)O##MXEJ2RV!DxReitCQOks~IGw&dp*g zuSGmtsYVG5@AU^wmKSAig~wmGs;2faX`xjcwu`v@sx!C5Zb+Rp2WHir7Zyev%vJ~K zq}()8l92)-B+wOc950CCt{N)&_6Q=L;=-B@UUDVFf(#~ljQM&6b$2b1&zOVn6`YJp zV=bI6D7H2HGT*zn8`sOjTyw<}|6&92c=%`-mzF{KcXLk1(_L+G%-+g$b?mQwb7PIq za8JQYtG)WJ=8gKrq#UICmDO?h7DI*OgMR#+^%O2?>Q7e2UUeIY`DgD^WogA7csE=y zUCxD6EKS@dDkOMH7q79K#zVziWi@*saZ@myDkzCJ=NSSZ2qJboR>) z)LmguL(&G(nYhdN`1lQrI>WY&RZ?59P4D+aLBRkC z5l1#`7M0YLyY~amqXXBn|JCe-~>UrgSx}ht=e`MR|5kcLnS`BK-~N!5sI=~5fxCpMi9F^l6TvMEukCCWafLb`2T(pDelMmCcYk@90P%IeOD&J zd{g-iS;x92q=XJj*YnOPh#eat5qJM2`^9^$77Nsmz5?2>?+Q&PMB^E5_<>_4|B_4@~uy7mKuE|pqml_8a6&4M3Z9(_SENroR^p6v17-+ zx3#ygZ-o-(qM61dAh8_ozYYO{!`tKWU0u<5`1{=4>o9!4X8oXM}_lq>v9-ow~lzO`8pUztbf%3j?JRGA{a$ zO{kKZrb11f>x+ylbwQfP2DBQ^(9!Xxe1G0sbfYE_h=Wxfw&fM8g+bXuOk+DR-5Pzt zWHcP}B&P*z(G0kcGVwdCd`G^9IP5Edv-SS+`0d70470+AjtuFcXJS?@EUZF8=)ns6 zR6dUFHDJkqhKlFhm+NM(;Vo6bK|DR)l=G1Y%`AL-u&khONi~i`HCLb8W1YTY`hxFl zclMmibo*l!WL$?8%?n)IOiYHJgX!nY=?rLzk;LqN`nbPR22=3p@L9XqYPBTBe9y=I zbDy8xe)o{Lm)3hz6S{COBPul0C3~AHRu^UbWt@H>=iUx|jeAQv1%-*kkwddu4>V+~ zyrBu#ij2rl9@}(V7%W%+rygFwe zGgNkOPIqn%stD?CuQ>zl%dMkdU!U9^kZQ`He;GW)HstV

H%r(siDVLBe?wTnG+heYW_=F@L=AnsCYJZc{uqs7lkpfZ2YkiiPm>{nuruiMD zN}MXD#Wx+i6sWWyPcap$P#lT~- z66WvqzEa+X=%V9tJ1c%a-iMmh;j3v~_VMv?d$U}>$M0QzCaKPO>eC&8V3Tv+W(KI1 zcgO*m3cWdWi@iXobd8n#lpRZ;;z$jAIP#0Fy>;VVt1S%`_(YAZC%j!9JwAD*C5RX5`^8E z07o50ti8bH^sA}HU(Fv$#lY^5Er%s|5cw)-7XO!K4fPt}_&R#<(u=fZ9C5+cQ`x}`u>=pFLzeXLis{DQ zdh7CQ9n%ZauYjU7H#Jtr2t+OI!CaC$C{Awq-`CO62?d=R zYE`7V=>C-)E>K5U1gC8Cv3Dvqek5Z5aA!94{=wbe$~KYmHX=+SI~d8a#|{B{2?`%6SR4-x^cV)LIz%bzA2ehOFRUTJ|*;%Dx1@Zl$f16q}^h zN}ifVPV+i_b=sjqUgV8FWd5^{&Y60^uzMMXqEeEQYQ!Fj{5pH2Fq7sM}JOvio<9`HBWd2bn3Hr#0NJyT5wb1*A&XaDcn0<)s zx7`OVOaSc`DPQeeHKC$0(Xg?ZEdcD}>i9|?XcClybTUp$sRlf{!1!(tUcVcR@{{hJ zSFc|69$t~Tl%z0j=jwS>hbU4CedDYjO2Se|PDfX}l^ZQ{p+8UkoNFQ|G$g0tFn-t` zv@8A&Dq!O^`%ZDGC)Y5dZ6f3}bmx7BtDMT9p_7}yqbozYHR*I-vz)U4uFU#IoR&;B z2#@H(X5Ci2?$+-mnuz@|QC5YTT3baz<6aRarm0J?Zs@Y zGX7D_%a9Lhb^as{#t<8A*#ZiB50*^vBHnR+vK0b6x>if@2BFGPbgC*UDjp|i%snm_ zzf~e@NeKAN61Zi)vYHPfL`{MB?^}7{E%ew7V`P;+4M?B=8+lz9Kr>gqG()o}jsQ$& z@>YD*r&I;}Dj6CIA*XrH5L|Tr7<7tu_F!vNs}_=m#rN;uxBA#fG2I7Q&`Y?_kc7(U zLc>yy=H_M#(x8v-b;}|FTQ)$?!AL5lg-K3zU`Ej{bhtr9(y)qzzt^-DU*a?A^ z!ilpN%D&i^^9RqoOI5}hoDL*Nqo9Fda%LovDSkSx92PA8I9KymZH4WZ3*(7$7}LCB z?j)40!=i9|a`Aj@v#5Qube!uO&7;m)&X<>V06BD3xcyneq&tVp`Bb_-)} zr?+*WB|bTKsss-skSGVyHBmT86vx#fBT)=kTq!KSR~z5h{#bZ!vNi5q&r_@ekst)} zio`DNH8?i#XQL&jf?Am#0K@pXIsDu!N9{csxlls#v4clofpS6gj`$c~04BMw3+6bg z+sI%wL2Cd#j{pHYHS)ary(KC4flx1bIZ8YTeG^%()Is~{={D?po~{{<0VW-$dIA;@ z{v4P9PTV$$0P!Bnf_PdQhe?i3RNi8hbGb+e$@NxdM(ocG5W~-b z0Xr|{#w&dw@{#@RF}w_Q^MAB$tm*!n* zAz=ghf%DKxn~u8!ijcK@^JR%@cBk22^=Go=_QXv66k%VqLF2ohp88EC0Kznme?!`-t-Vkl+P*lPLob`fOvxs2w=uxpFe z)ZLW}Rki-AoBinqNq(!;bLxX@oGH5_=Tbz0A7X-9@N+!enEqW^_~h1#j`nE?^Qp{pd?B1BF&FqVb|_0D zF?*HyAAkxP|08}xP|Hey<^zo1hChBx7)zY$@)*fzo6hsaT{qfef;;6G?&W^lfr+Eo zwd~2Exgi^C@$_zN@t;xE|w9g!~n#2oKROEepZ0=^LQh8;T+!@es8n+X4n+`mft-Vd0t zB$2s2J&-p}5`Zc^1a4$^88IL1xBJVB&_5G$p3f*lTp$Soik_YvM@fvXaLzrb z`rLv6b+ZaZxKBM$XN}eg%yihjs8SjWXF-`r041%%a2B2>Y56qH=H(h)zqn-1wPr|NY{!3H9 zVl&t+TosMki3{a^Dq@W&y{p;foJ5$N0+zU5_SpZ&Pa_rY=lA?Cwd?we1bVBXr?8SiqXLr*XwuJVih^!-f9b!Z|+?j3p8 zzDSdg)yxPw;aFXw4*dsKbLk`)B@iw~Fxd)3MBm>iGJd-Bq$4PmTbixQi0_M(#h^*p zV=?7jsat(y=Vj;VugEUTPTuhH-}$Kf7@~5Fz@=BAs&Ci(fG z4N6eo;)(z%>CBK;2dUD$fl_O|--id>A|mm?fwK-vgLyH<=x_xrUHX;%s8F`jY%Apj zab-S6-005w&%P(}^aV19cgALE&Ip%{`dSEjbMvK0KDZoC)mfqZ_^F8pcNl|guvhJR zn!b{nZOZ!c)5v+<14+@v>2Ea_oQHaug$)^bhM-XOmz88)2^<0W`b&DRLN59rUV>8a z6^b*wh~15!NqiTE*%6>CuMR2k6DYFlL60Uf$UKvU2K48?Veq1?@JI@pnoatrs%tV& zzJzmHhZE(g*c|9j(6OL4g80f@x|q-JZT=Lw_rpguME|*o>4k~aFF!>b-;qgf4-uai zv?VMumgKvR&JhQ($wgxSE#18r`8~^$ywt$mV9`wgdq`&H?ChM_YuxzYCX~SY3a|29 z(8)notY~{7`agHqdAk<`K}B8l_I4@s;;P-ubD8NZv+iOjyk5+}2THo98D%suqHG&K zzkLSRNojkv{x28cHpFM|@mSc|6)8yfY0O@i43t;|PPUGT__UfPLis<}?AYGfRxsYR zDM!ZJF22Rwsqq|C)6Rv5CYU)SzE|GwK+X?4o}rg<3o}xbX7;6`pkwpo@sqf#4{b@} zmUEv@=?XcS6~)w^2LUXA%L9JhM4ko1Phj=YbY6AkY zz0SN6vOwhi6wj@2?Rf$~W-3BLLYUYL#H(IDhg@d%`ddFm1emGfBV)OBo*hAA6J}f( zz^UN904`Tr@4DLirhKMypmV=q^xqd6vSR}5qr}?j?GMN(PYkd$eQj%ekPRxHy7rjv zC44hpM2eXGZUE;bBwn#G24yzrvaZW>9r=3CqS%mk6bXmki-BPD>4OXX#VVkeJ@y6T zKW0sV%NEnYi*T#&ndwd6ru8p7bpEV2Z*=>PgAUq4+Qm!N;~77GM0uyXZBtXexfztT z`&otj_C35nl^{f#nwJTrn|W_eUYY*#{Cd}yI9xPy|Kp?Q^Bg_@_R;gX5EX9ocgrj? zM?yI(9qeK~#xK8k77CG$t~f{er&DNV6-4EfE{sD9-?4UDnhGR&tJn~$M*LGRIGfyP zz5>Z$!3$AhqE;e|CIORk{YV<=GZ0jnQdLntUtYN`Hu^Bj9;f2f&eHiUf2jQR1&7ud zOr{G|KP)A@i9FlCekO8sp$MZC3wxB$8(PB<7ik1Sq37M%t*oF{%ulM+B&=;(njRv? zJSe;C=;~5z%=Rc4<*+;MPfzK;_U+&1>n}JO-r~{lERKf9hHxWYYH03~&Dya1?yV9j zxw>PdcG1Pl<+*^E^s|=d^E@+buT;NCl(l*BdD%w(*|KWZ!t5^LrqyCG2{8Kw1z7Rbe$NUeX zJ(iSlAczggRs6uy!Sk zZDjz>o#O*GXeXz&$}^@7%vL6cpR>m^E+DX#g(l{#3%JOj3qCC*N8Z)^CdOUgJB^VP zQwoc~*LxPTu_7NfyK(+3?``*!N>Lxcd%mgP*cc|Gsh7d)_QM1OE2fw)tuOwO)&n(E z;ViM`LTGf%=xlb)y&(NV)GoRCW;U=oj(-~3_$sGX{^4_Ox}@H?^VGwi6CFM7adG(2 zX(=2eyaVFTBX=fex%Vp{%XXb3le-ce88hf9Af4{knZkTfw0~*BVX(7mpftq`>c_uG z`LC_}D5(v8SK~8YFCy!iZAdgKUvil1&VE};E4DznWJ6DJ88wS@R`J16GLwvrP4DjP zohvX$59(KGNa5SAy}aOXV>iWtlOcO0krrN68jbdx5lc6Em3BeQSGUwlJEoclE=Y=P z?uzC`79j8OIxPJzb!QmU(Z9hco0_4mm!D0b&5egX9m|!4$5I>+kRNc$Y|e-9K9dTu zr@R*#^O<9cA!?M`PYOa}xtJ6QnB^r|o}jhkVqTBB9OraMT)VVv*I&0a z?Co#uW_{?rBq3a^>9>?#XDaD6)4E}-dC{_hjHWXyN(h^{AmRQHGP#}2Jp1^~He-Fx zRTtdc>ThH-oAoLQLws2X(HiawK?Sta_)k9liJndUyT2aX+_Y$MB{EUj-T6?R>9_Lp zjbB``@U2tmagupz%A?R%4s(eDd3@-s?T=Lbdp`vV85nqMklz@mmviG;WMJ6khTjU>3J{2fDPceV1wkS z{ALw#J{(bycQt*2V2)-{ZX)nb40Mv3uzusVW~TAC;{;x2G|)k?Kz=Asa%1E)#;wJ78G@RN(1&Htq}k=*@)z*2wkE zS)KL$M)6rMQykznle51tV5%GO?>1iV&H5d8czCCIFK=_>zGFlVdDCcCO2Yo_9c5w> z*LnT4lWrKc2L zs*gnri1(I%ewK7;qj}dN=NcIHFp&?qN0B%gTtK0Km}l2mH6hbQwhd; zpBD0%ylAxf_D(aEb*=B>?wp?fOIz0q7yT30cXq-e@fBVw%W3V3@26H8G22dKme-OF z`!YRXtO&^br#gDdYi*|-gZKf=_^qBxx7GrR&eB|Nhz`fAX2KEH2NGfjx2=8HR3xA-CNC;+dqPcwIc+EehoWFeAFL8lB zMJOHNCxzT#Se6rIw0BHsl4Y^f`gST+%EMLU@= zy*rv;_tCK{+&5(X(%xk*Tf1spKTC6*6S-_Z{LJxT`LX$XZwl%=jNTjAJ=@rdQ#vg@ z@}pc{7uDK}i?)a)8^PuqIJlS;$|sG^HgEmPYlS$cx1B?N5?bE}xs&4Uub^5&GCUWA zc2^IoF0rhN26tQq=wD!Oi3L%1zFMBG=~zRIpcQsq_~9`o$#iy4;4zk^PIV({YZGT_ zUVP7x7x;K?da7Yyzg!;rSkGR0w`MoeJGFGy6H+?5W96Nirf6m({p#V++{8fhbJ!7M zpnjse{~B?86u|%h`zn>WT3u}E!PY~;8GeJgM%%1gubdRoP9F0$qhekaGjU((_`LRF z%6Izr{8k@SxmrXkZkB&#WNA9jTuXZ7fN=19VEb%el;~5NOGO29Ogu>URK#8G^;mr6 zrhzK?Jv34|qkJXvr~2ECpQH%7k5?sljvqZ}%oei^JtDTA zUTf4haVE;!`86nfh6YiUrf{5a(#ooM^W=$GQ)hSd{dwJaKNLt!PM!RTJwp@pI7+W?N1n zgVg=lEqbv}p@ZMQ$Rsg{gmEa(OxPYIbl!H-M+y2#?a8)1uV0=Wy>7{ie{G1ODwq9@i>L)n~&jf|6b z<^J4=RCZuO+_~h`Lx){ftD|L2_`8ukVCtWEExt4ZHY?z`hokK=jb`>>?G(mhX#lT!Bu?0-XVpTRXa0l#Iif~yJs$H5{9vij zOs4)-P(y$hVuZXrX+Ny-3HHGvK*3iC(Cfdlx_5F~m9@jq@q9-HIqQM6!@D!*w>nYH zrUG1CSvm@)d9HS{l{O9TnKN?->^!YqU71g>Fle2em{wU-R*cLOD>D8(p(VrDq2uS< z?H{cD?OBZu>Lb61e<#V8X~(FpL(6jo#x;V?myV(3-|w~6!?tfuyMqdG_fCdoW%-{& zRr;`)ts|g#f;x2ed1PkK+k;RMi@{5($d->kc7Er&k*9MBUQDLye5|B5Vyto&KX7jN zar_T|>l~u&rT?|Xsg7{7D?yhZTt}72yrLYQ zJKbv|l$gqmgz?}dju)(yoD$5fz9MF^`!3d4{BiXsk4}jElNR$f^rH_WjBcvW!x#Du zDX2f>yxSgq!AHrpKrQNa!uPe0h12_qmHU}g8_795HD^aOV7>PSF@+2$x%Fu z3^HN}zJcAT)<-)P+|WnRh`sosOW#pH3DE&f!aFXfsIc2X4MSqV^$TvfOM*ZKS?+6W zO#l1$^S}JBE5(GM3namKI%R0Dr8_ot-8Qv_jIR(TV)fzzHY&WBit!Vq(B?!)!_p+= z_tY!*P5kYrWHg^L?MjG_tckYd(NLt|S3z8xZo;}ye0h@@Hu$IF^$f+4MT;C(@eox0 zlvDp7;0;6U!DRqM5c{TdLBBWZm+%+bFL@fm zXO9fP;c$>5_EYwlIByJ^vHmqNBZ~?-E@$G+=A^@GhnkMg@c(@@Q4ok>-m^~(W5oeX z{J*}W#s+4aXfPwTMvX&2oi z#TFzK1l;WS%|*7(|5&pRF3<}0>dV)Rxp^`WN}({AXOzgGn~cKLP&9!=g$vUeLy(S> z>FXZ$+F@F6MwyoAe9uPk02GdoC9XY9%J5O?X_iB+$fjxDi+Yd~B_{ZxMw;YHUf(Lt zpS=<qNIJvk?a;5q-X=gdhsWX_m!;g%gsAfpd#_9sditY zq+~hqVUN~;%bx#;cM6+n$_0euB(>y20gj!WT?aZbsvxu`vPo!!dQkz~Z3I_?us_$k zXN`V~HLXWG!|_Muk!f|4Jo6p<7!j=ibrf7kav!inq}HWh52w#PBz_RUjyLN6Te|tf zhhDhMpcA-_d1lpLE<*VoQvkfTKC1&LPYT;032D<-tUNj6YZOE@qHF3l5e27Cy z1;As{Lmg-xF?5QFiID>=i;p>0qe!cR)*D)!YwMq1FYX1_i|EJEm3x4-@(AG$KEE#T z+`2#t#HY!j-af zRmGqWs#Xa=7vuR}<_}jviI_Ly+&(ovKHhsps7=&D{E#=6oPvVo;CxqX;1rQ6RE?Y- z1`dF>Sj=u)55=bV&&>36z5(GoT_ct*mSX3C{RYkFC^aPY<*BnY(+>mM9}J+xKed|z ztof^U;O8pcz}IZdZ457O)I8w||CKU1d)jzwWxQ#blH8eq0|<1WN_+B(GNQM)m(6UA zN+{rHt+T!+o&4X7KvIi&9G!?w%=*dluN4}tI31xRO|+J7x$O>x16Rar2CV%{Eul+- z9Z5L!Rm5$`|^m%RnHeug_rR`dO}-2`ORZlQ1kR< zX}UXWH0PBn;mSkI@y|HoF;*`*gjcXfOM)z|8z0dqN=&tIq5WvA zZ+bvk>Ao{mBxzvjgSx z_Y_l}q#+RD{E)uBU=Ion0ut+RA1s*VZb{r!B;nqd1`sms_4B*7sG@#6xFnJhDQEkk+jiio~KeyivfA|j%gRjvNFs~Rs95tE{?%0JzFpMWIfF2tHq%HHVroN2 zYhkCPQa{gUzy9E+T|{(rWz`Ke)3|b#N99}Tt~u)q194!Snl?;GCL}InCSq`g4%I^_Q8HU23pKA!##Qo(B95ymNWwz($A@Df zQNwbmv6BD{Lvgq925~6$VL36EHA!!e1!CAxlMeBnE*d>oOHIQ2`t*0#x?}-lR4DV_au_VN zj@IXnut)#!?PlnD0-JB;@U?)X^8Jqjn`J$=7+>sN8!Gc>3(Co18pV>o6FCf3L>kn8 zfSVC#fq@a9k)gTZO2;%tgnJE@1(gT=VpD0`uP+|QyZ00si(e(+#jJ;f^UKxsM=fL7 z55@jGPDSo4dOVCt&>AzTe+=+AQ#vcoEnX?r#6>TO=z- zJGA@~&Yw6T58x7|+iMrUwm~`EfL?{dnZnx7PHH&LnkMzEDlwQ&&lh#oFFBA1{V}qlXRHyG0->y;5^L&V>WlI*f$l%p~pIo#{&= zBKj2P7lKB5_ZFnxT5{lCqDOEE7Q6JZ8};K!)d=GSLsn;Fy;{THpu*A%s+nPrLIBW| zc(rwE#-f&RQ^pu`eNx`+!ldNOt5Cz*pRf)tl zswnD_1#1*)*uBYN`w$3bRV*|M9R5&S!(2eR$3oX0!0q%7*$NR(UQ2Mf1dS5HpaI(L z)E+c}(0u>m>pEb*+g(O6{W5OLRB6Ws8;uDtnyT1J!Ry%sCWa`Dj7!x7JbDr|yppe> z2{0@)l-p@Sz#YYs4cvKlXNmSH?-H93r;HGb&PSz+u*-=LX3YivmNzH`u@ z9Z4@$ana~z|8*3xv3`>CG$*koHU^AWf}@ky^V2~19Ika}MDKRC{+^ zV`VtF!jL3{`1$$E8$G*fS6H>H`VQ=H_ywR^$(9QGW8cFCK6BS0K#u#&ztK$5nea}X z5-~>aQDs+d@78QAEenUJUm0FV#$opA38E?&w!p0FLQ}YHFJ2v|}Jll+3QJtu0qyU#|%NWh{k+UPpys;3v`l84V@`6c;Zq0yj^J ziHobHV6AbCDN7U&h>E#z{zhBd+IVuytGR_ToAgExDtbz#Gs|wNL&k&wB4rBI2h>j= z{7GdRPvb!e>h~Tb2VdsD6+0?BLZr0C-k5O>f}5W_;ts?4OHt1!53ELSfMPf%>93l2 zcG3=O#0GWFmhWOrV)WBk`Q4i5xZN<{(`Kmxd~Da0HniYgf~0>OzOY0G);DG zC_X0uv9>spPNMhk5Ey+{aG{Z8ZUXMLYxeLsSnIgl| z4}4qcO>ddZ1@7bG6Ao(v8#6T|alnqZDWvW%7eJxyz{7vwTslz^m2i;~EW&?ajUB-5 zteb&m(y4T5@(5BqBNj3$Dz^z>|Ivrch9pks^_h5J4k@?da2Y++BX{0O9_`)$N@BP= zWAu{eDrx{y6e%Yv>hlC;3S;oaKDc_;7);0g{KsPwp ze?m@FMC%TppLQq%@9%m3qt<+h;3rSm!a_m>C6G67LQ*}NVdrrRb4%ZubLW%~4-ZXz zJ!q|N-FjW9)N2BHr~WV>GxL3*tQbShQq0BKnd`!ZFcwx;_SD+oOTiGS^^Uy_ylTj~ zY?9XV5_#V%0lurwJY+a$yp)!*#;2;Ju2!m8q^_P`0gQ;9<(LL4f?DVSL?5cK z-MRd_3{cccRuQ?U?%2n1YCQ$My}XeYh2?#`AO>`a#bl)Q@R4?#80kTqU=f3{&Lt0r;m6clskau}OpP7We7~U;& zK-uL>JbvB0{XCXe5|U%C?{`X57cd@2`~UhgZr2q2FoIjR1{KNz?wUR)DBB5QI!^%{Vk_L52 zlOUeC$66-~+jI_o5SoC)XwnV(D7ZRX1v`m_BE;T*{HO=ECfr5Pz<+bOq(rcE&En=| znF!W%d8#O+vhq1zM&}Fn3Wp!qeQmoQqew^; z{*5hmpTGYLjtfHrkd#gsr6%{Tz2ZPb`1e$vX%*x|v=bAne<^?$p;I--%Y*Akb>F`q zQ6bb)e9@Z{*8^+z6Z8sGRFMn*4*rnw!KvEP2lzNi;04y3+Jwm6y}R1>kRQ*J39$V_Fc4z-I~Jb%e~WV3K#>wwzB*asCX0 znM_>-y#(@b79gW{pEL7eW=GP75mYF)!u5f1AE)MZSFdqb-;Cp9QOF2S8}8s*(NDU0 zwy6L2h3hYU%v`yfcBu9Eu5`6-Z+tT=04f@ZybFtD&M?hWd-XrnAjKX6zmAEq|9GH#wsR_5cq+y7wE9EcqIq|MVq=rL)FY1)Qppb?|@HhXC0ljwC#F zejT7DW5qk^y|p!h@;n6nRG5p(tvES39WNB&1v3A@RIu`vlu!Jl`jO>?cju=*G_sTc ztu4uY26Z>G2KTc|437Hh(op4cBwP&pG3f)~q}v!{*?MFw$H#)tf5#e`;RlUOEW07n zI)mTahTc4?ED50?FnBx&{G(UjM!4)%N1kdP!=mYZlJ@22|FiODOOOIrClxz@mP{n{ zQe5LYrA6p2dfcL(op^suP$AL(*Y#VsrW`kPB2mUH)g2>C9kczCnE)=3#I?kIoV(hT>GuLAJ)3K^fDY^F86Gm5c2Z-MTW|7crCxgfvue7ekmU}CGC<0G>5^f!^Hfeq z2vHaOLc9g}X%5K?R|}C4SaAvn2y7V&U1NDDCohlty~-)D_w(n_&=XXz`WyUzWBNMY zm6eG^2LuExLw@&-tVfBN()pJ>CS}JzDqorG2R@v`8i9+C}032-D%V-dq`xmc( zN!u3l4uI@K4&Qs_Qq5cIV9{(Q{8D8U^}AaWUy6*AAXN~kYbVgs*FU4Eq(s&50ZDB4 zak}+1$HNa1Gz!q1U#0BaBYX}@3{sk4DHxf~9pG3D{@Ha|u6Q&wG+sMT(-kkYhmvq` zB$Ju{2n;26YwF6O;@!2NYlSu@O5kK0g-MnKh6aH{Q#*(tWk6eSp$rHVs1yNosZ#FU zCBAj*?>s)zgL(7`_;=k3l0MFuyY#pYf(`ls%$ZThiL}GpfaCY zT4E}6!QX^{V%Z`EO%R&oR^UNuhbN%Sm|?6XVZGGp564yhW6c{=MD3n{|8!$#XQAO8 ze7jH*LMTO|NaD!WWTHvwOP5=^A`mpH>m zsjZG`=^?Ce;}FxZPq?}K>rN5^x&^YQgu@&iuv=xNV`TH%QSj2gq`nSlH1==Dgwc-2*BNT#12wcND80{wT@@%|X&V;Ey>lJ*5#{N@5)vVu^&Ads`6)lVw`dvc!P z{f(RCaPeYWwtp$-oTgtge10RQw?>n4KA#$$q|A%53Yww`1m{_%&dwXSQ%m}8D{#=Ff<#qaO%kd5()92#;yCH%QQ9G>O<(%^5AhL1$SreX88 zjJo+PnEv?*c;a)8=x303zxXFVXk9#4+sgsy6;Ki$T7h(Fa?mNTu%ClYih|gkW1Snr z-!2)(zv%*yM%Qi%P`Bo@d$1pMsT>V&DjW3G*bfe<`xPYGukjlpyovT!Q&#xkNBYvD z?^J8+!XT`;A;Xu}kSSgIY6io*$95=7|L+SgIe(|7(=FkOw0kv!2}V7VGbP5uWlu|z;f{Bm_*`!B5!NKWY(_va4Q$;%mDv6>yO6I1DQr^&^_ z$|42(PtQOC#V;0>EsWctgjA$?0{$aEKQ+w1Nq^`+N&mb|J!-|;L+{X3)imF6krJ15 zD(;^f-4i%CqG79tJGmkEg)i;fxpG8L^-)lk-~7471U$`tDh+T`pL0Z}{M2o3+P5jL zgpq!h!v851r&4^u>Wg+E#NlSwmCs;!4*e1hGH6f0D*42;x9neNe|b@{=@o{-3>5>zDj9)Bl3M{+=r(Q8TobR#AWO; z#GiK+vKL7I6voOzrPuqf>G$#ZZV-Z{5yt*@y`Dftz=TQqp$nS-XG|jgJ0@4<)#!Nt zz>(O$KPlFfX$=ve{bzm-O#uy-PB)=wt#8&Ca0mXp^s*4NWj1~8*YZ6X`zJn4p^z!! zrTl}uV!l`jimh5cZQriZ8K16X!2iz~i2}Oo|As)>|1%%`^S!^||5i7q084H6WTTwm z9krE?zWw-IXYfGdpb&0nP3=g(_kShm>()#UzNy1m6Gy5|2D zZ*ccT2IC*=b#lz#3(aJbdyUG6$jD;)pLJb`gj$(-Q!@W;mOX(=108^GtDPV4{?3?9 zi+w7*kk8ZKk-H#76z3#wD-u(gd^pVx+1_Xzkxz)ifpz@%RZKyU%wm?Gwu^)eA{hl_ z7FV|83gsTV)e%B=#dYzjhKT~wP}F1pok|o6=|8jBFF(9@elxoMv|7avG1xg#m?_}w zMiyKR0*&W0iSMcXkAfC6!*}#>{>g{I5HF*F2lVOQRRDcUo86yBZ#OlWQ+Sg=b~`X@V*QqkOaWA#r8yVI6M-nI>A`Fj1R<0MI(aWa{{lEU z_GNlG?Tmg$Lqp57LG2kHHW3dnMk$)W+|lL75EKk)Rr)(b{b>BFY%(v4P?x3eu;MaW zgr1|nI6?a9ZbyKF0}NOl1+8~HE(o{`<$nNiDj;Ey4r&uvOTYU(KU6y&Eo#qCMtZ6J z|MVjR(6B8MVf}N-P?d?(%Z1j9sv;B+PLF?I0-wR2p0QtA%7TVy**na`CIVem+LxqCR`hD+EZpK^Qqoe>S=Ypgch`}Os8 zG~%|a`>U%fU>w%e3akp3J;LD}y$P|fj9K5e65|6Aq%a{Lyl{q~$Zk|LMPl#9P9Y(1 z55wnmziQ&>I}Hd5Qj#`=Df1KW2*9idPp_DOh49SG%y&>%rv>-4A0}pIW-|ep)X8Ie=lKz(W463MQYz790K6xQ2j z6A?+U3zvZTF=^|{(UI=H#Z2;Ocu)+ray~OVHFbeBfO}cA*Rm=T(};hY!mSXJ_`+@4}*BI{$ENfV9ewegNcW=s*dOO|ml2(Fq~fQQ*3Z#`eI-K!&ypY+Q+B+O(Qr1ASg^`M=)O z@{yyQe2k zRtO=}7jG4k^ndXJv;Mt803GJjf$-E_=_kN{ z=t7`}$2lpU8oY1cwtF6B0F~V@PYULLpY+i$0ZqH*^__3BS@QtfrjgW@0 zh$wskFLNO6xx0i z>Lg>oAnnRXQt(*i+j$@B zCQn@c#p`=r+S7qxDBTN=;2QSnCQc z8Di|V)93Vk53P3?(Rb~_EumsNhb??qjPb)ewX1}(0z}VW^k+!>3~Q+Q!+3Nlj#G2ytFtblWeVe$0B&tjk$5`V6bK@MZh!`xxk!;L#E+E zX!+hq7wixgcyPxsVEq9RM9KndASs4M{a4Wo(6*C>RG~;Eqs7?VfhhV9v-4TkwVZ5{ z4|9!h33?jOSlvp$X`h2yZ3%nE+7dPWwgFFk0lpjOEY|oy; z8{6|iY1NGLQU5*c%!=bdFve2e6za@19*FP2<8_ba!Hn9AeP{4|ZcPU8uir%1%L7zX zvFAq^XbDs?X=&1Glc=P?u^ic$LE*&=m($G|Am%hlYN=!qxCAz;DbSEn$`C;+7~=p^Vh~qkS)pJ#n_en9W>D2bZE=t^ zCunuIAO`DF`ez~QCKy-HVVLlro8d1APq-z9=n*f40?@_SPn)$uy7TUElJR;cYY?i# zFgjYGK>NBS9UBrKLF|F3NEOOh0*;^*#nE7QxAQME1Mn*-!-!z)nAcuNwF^LllK}2_ z%3%Wp#T&U|IT%pZy)6%fx|D&Ca`6YtX|R?QfqoK@Pc%L2#I;s454jX*p@ae6OC%se zU<7A=Aiy14IJ`vW{@rrUWHQ+BUP!Z$Rpcp13TaWm>mcZ~;9)=xo;3=2?OtZVk?;gG zViI=tt59%Kia^U+!y`aB!{4A{O&|5ybO(fsypKMvTwt(7!h#CLe=x*va!Tn6fr zLuC@gMI+BQUl^NT9U1 z63qZBJ+N?RUSfb5X96>>N1V}+le1OT(7-JV{!}DOiHoO^Cp&+gZo8x*aOFu_ zxW#&=Y%PLWpym~ua*z|^cF`@vBQo{C``&?MbF4NPiL&%UQmHK4=-pQG+kH*ti>wmL zy8BwYRRBG$IR`;%h2j+0ARYO|L*HM z1{el3)#d;Na8c6yaD#?wSz|=-5uACcoKV6gn)yRmPd|V5{_{X@{?6>r+1YsiAb)^m zNaA$6u%MSr0>BhmS)Ox@#+%&&tS<0&zZc|^ z=l^8qq?1o&ULWBSJNlp{q1Q;G%6q#WA-fI2K-RZ!FoCD9*{rl$K~|NGHe#<#hL{14 z`U^C%yt^(floX&=R^ec*!V;pFj8h85D;9bC%v( zFVt0e0&x`DVjxWu3xsKf_nV_bBIa)8+x%?q42R2MzC-{5(hAA4$&qRJowTIcZ=Xf| zOK{7B;Nt!6r$)=Hmw0zUo-bi+Oh#=K_!Nepp9Chd$;#AFJ@z?(s)5*GNe=s;+0am} zwwUP$YKt%vg{CfU=#np*-7e|WC0?3%3mQ)Ee*JL(p^{(AE`5OSOZ0LSL@aC66&cX~_U z*&(=ke;$RwXtbi_;c%*o@w!FZ$UeyUI*;h2oxZSl@^7dOauv3AK5YB6P`CB|PdC*u z(OcJ#DWmc+d$&bO_HkoJAk`Zm&=fd^} z{p67jdOQ=`JFKLoIvQjPKZCo) zu1}%Q-OlkjUH2o`<62AjL~V{5r5_%e4DXWn>rByo#k^#ao6dNx9bMA#NeizCpTR1Z z4vf95cJ%4?3%>QE)0NKn8C%n{G-O*f*QGm2N2L+0HUSblXMpJ9>Sg6RwmdYmbP1CxZe-k z9SA7Eh5`eWt4Ki6aSe-@#B@nsTxEJk#j#XGa2DA}AbNMJp}NV=xxCi8Si>dGaweqSCYbBXkTzSEDhL}d# zF9Ag9mFfpgNsx`qzc=ib_bV`stGtM~JPcHM{o{On4U(rXR`V;` z^H&bPJ@eN3G$KVAUc%+R2M>c;NE!@Q>+`rv*LJQK#hMM7R}gI6F;i7e)+(_?_hh>$vNX&?{^F%)C;6!uV$IsB)D< zi1!BWeCPNrh;ZU1Jdg8=qmFrbkOBz=p?#bioHTUDbA~2$U@t9Y>ijAf5O7$Wv8kZ9_m8Kh7J)3Bln?qzQ<9=-uYfO~J zNN!>^8&7p7C*eS=<_~KC>hQ~dXudl@f-HLnv~MZs7<;3Sb7FK|gf+CIKyKX|DL%UN z?p;}AyyFp>Zu=fOS%YAW)z}{285gvrXXY>ZYQI zh)<~t&Z&CYssYZ~jrnC~i9%eq)PwI*rJ98Fy>H6<5?EN+F~9JP%rXGr-51 zUP*G=o=kGP8#QbOs-_W(9x+ynb<#jDJ*TqLzAhm}Hf^F{q%zix^d-4W{Cn)3oy^nU zsVy!1@dRAK+}!pU8NcqNkFV_~y6pEt&V{(Z7HMi4c?FXw16hFx|K(J>p8kR2E_cV% zoYS&QWI0kE zSaroQn((IwND^6esm%SuW;G@>SH9jW!na5kiWU(*QS!lmm)RLnZvSnVjWgAE<`lCk zV3n*a^zjL_1^~npwQzxuE&w8MPzp&SvaHADoXF6H`)EAU$eeHCK-c1B#D8w658x~a z2n;&on^MezEh-+v@m08eIOylgmHOS5<#-=LhezBd|8*M~nhwxw1wSYJ?-#Xw6`uB| zZ$V;G_QGSUKUv(MD_FJFc85Wo<0nm)(PDX>nh;|l@%yJJ!;BD;^Wy(l7y$7NP)w-3 zlebiKj8v-mVx~JYlcFLB5iq^cw30Pe>doTMdB&0z zeu!=5+Yg@6Da?m-$i5p6S+r9%qn-;8hOipk?S?=;=vCmbU3* zxnn=cHWKLKfw1<>Ts$LP__>>(`TPltDVn_XMNDssCMR&Dc$`+5Ez$g^PjO!MZzF5Y zH0E-1O6JsR327p0ez8OC4IBeFF`);ajsW@aGiBcCHeGVzd%#QmzXrM;qSC7O1ro^N zMkQq=HhnL4^h%`~f?O|kCIlRVrPc2EcG4=$2&d_?SoKaX{sGpKbU=a#$0h?{Py`X8 zHRG@OF1}eN1}z5|b_t+XfNYn<=!A{rS_bDAcTaU1EomPLbAy$>lSZ%10% z;n0cNbPlL_@EmtzTNTFJJ{3%?81yjTC8__o`XJiedNjOxnV$8K{t^N{KO%$$i9!XS zF3D>_aFYgvRDXW?&v`z_14lz}?;?%AGrOD1(9WZxWQ&>N^=JcR?;9qvkZ@UpoM+Wp z-j;sR#pcUb;EL^4K}2EC-$~0x9G0Kxe8eKgzL6lzTlt}kD`79UdOjXxZck1gqu+^` zEj#HcJ?BU+{wq!A0B4*s9aB1~K1hSvQp^bYF0jk3T?5lHuaSxqK}n z^v}V5Gdt<{7w57o9yJFLGX|eQ1>&9O_Q5PnbkHMrzvNc!&s?!bA-Sl4+9o4+cP%&J zY!fdwz9GJrr5ow1q(rXvF&W9A!_7^_&JA;=3{Mu6IvSp%sUM@C1>IGvtSR=Tpp zV=)PrGuez{^oq%nC;7rY1Fpko)28byX^KV<*;np$y9VG}WZyjkX^o6Okj0Ln@#N?L zh|ETL7t-SekU;6WnmHj2+EoZC6S1~LRYQv!o^6}jn{$;mkMT-1r8Pa`cscd9r71z? z(KDQmR5#non;@sR>MN#h_a&v*=vfh0u9&V+lg{G$&Wb?o^>w7K8b$C#rig>aXfvUt zr@Z_AsD2omXjqIdWXC37L5_zZDv+yzC4S`l`BG4McJ@n(0m0UWiDdoggV8~(sMKY3 zWtxs}y+J~_Y!c@L33JUzmX9T2uFh$z8SAAetR>pdvn|ZJPbZ9xukV9a(~qx~R81%7 zZ@0HQ`XVDzbfG`T_`()BSMhD!vqs?Xy3p(4_#S}%+0ltcr$b;22I~J#^--{^ujS7t z8+3{DW!r^em+v?HVwf(f^X-WF4JrCqJni4xSJf#mS}q>1l=o`*u$Z7<@w|(BPnat; zz{#RuMCD7TmFZODF8KYKaHL|G>|%2A_*yvGnU}2n8z4HPtp3?2m}&rdiq_NQ^groa zhj3?)2X(IAoWMo@GY`LQa7er`M*)H*Xx;3L!SCaxt7kRA5vwOO{@E`d4Ykj(pD4XR zD6(JXX#v8g+& z{)2z-%?;XZf@RE$-^XdvZHr+K1wct>6=C?Nz#^W&q7jA5LNAYNWs|P6^NXYD;>njF z61V{!6A_#6a0fluXOoA!W?LD?n{HdP@GsyLwq8*W&?t2H8z!jK2Ypa2G+Zj;5$4Kf6XxII?wvxe|Ztzx^ zTP@U;_93i+n`dXBYgX}S^Rl;r$|It18(OR7^^t>*6J^5%jqq|!^ECCZDYg%$Rlkjq zb)_T{XN}r6UoReO=i&cd0A}yT%^L?x#hR%i>7Uk75O0%({mkRH`_zZf;hhKM(1e#&?j&;j~DvHgxO%!krYKtjT zN9GJY-kKe=H!MX2NcAHHQw)SoC3A$At<{=zh39nj_ad`RE;PDCl+$ZXk2$m%*W9iC zMN}NZ%G!j>SD9hfxw0rPsMnV{fbiX4TkQ^isw8d0Oy1VjWfBeeVci^dn^(vL+|!#K zw3^jHpn;PR5fgLS3RK%OELl@WUP2ZYz~ZVWYgA+d(nOGsz4^%}f1{8a_ou?VYN0pP z7SF@ehT&ThNV>TaX{XrMzLL(lUgGWU>|6^3s}%9<@f^e4N0-hPLC&u@-U5IRz<7n- zKnT5vtZZry>lxyb7?VPKuwO{G0a#+?1%OR30PJs+n?S?Ik)DQ!CrQU;BMKLkeD=bF zf`SHx?5C^@aCU&1Qz6M4#>kC47W0q*M7t3L^F5^-vYA=u!;7X-4%!Qy8qlFvuC<}v z+PmX&(ugWj5dP)vID~A!vXyz6yjbe;wcf^LZ{;rIm>I! z;S>+CS*WGw;*=B!Lge3_lA7(bSscZ%9K8|}6YDMkB{K8!k0Zpoi@cMw!LQ#DTZ001 z7Z5|xP&Y3)2i1nia}*(;a_%bQ0aQSNIOR_1U4iO_EI{$#Pdw^lm_X;;(5n^ECwctGdAxhxSVzR zmDwMU8ol^^n!X&?J8s_c$$uuBC2+@8Ws}GAODj+SbV#9ztlm_n*psBioyxd0ZnyQ) zu(&VZWR-%*SR}ZeZ6A+==5Zn4k7=wnAqXLs3ovi4S(%tBa)Py4CV<|ox)UTwDwR)2 zRT}AFqYm-fu6Nf9RmPf^;aC8WOCh*Q{#jJj0f?gMuYAl^Z^+cmwI-60Az_4|;er6E zNJeqChBklpXP&6{AT79ff_1*1P478mn6Ft& zIq_0}Qe}}o6akxj+hX>J`Rw@h%~B35WrHV@nbPIrqB!0pd?I^-gs9>}x_C+DDwmU3Z`oV&yUFSk3N!#Q+#`bVs0hnkHmVpShtA3l`c-fW?c>nJhdG zKqQ4c_||>$I1YVLzo@Av_eF8aLxiLCt9!a+8$~~E#70U zowJvm332afe2Q04V=_?U4sp`n;%yDzGO zSB)ap>=Jj8wV%aWl}=G<`%JSgofcJFrd|d%Bq_s&zN)lW5Zs`)ww(61M>32ET(^k| z*0^Sufo=f?0f8qM8(U3bMs?x{A$Xcy5Z6w|<5?t$+ZbHmrLP$%X z7O~*bF{K2Nj8SCH7_Me zP&@TgXAZepE+!5ZacF-VL=NG7u5Lq>_GFZwmR~j3BginEcHva$V_uR@h`+$JJZm`v((g>p`*Xx9TFV zA0EspDarABVjkY7UT=jQbQHtw4Tm>8UTZzn+Hv&^MdpvCDIV|TI9gC!9S!fzA?@2U zms)r2x$>aaJCE<2mEfCiO$`K9_C#?yt(D*y45UUZZb4L(qE?AX9QQu#E|^6|Fw{Sk z;(BR|(N_jI-G65{eTKj{CrK#eP|8_HOT4!89pT<;6N^k9)gzIex7V=T0W5#sL`I`t zTu`BB?IHGt|9WvSmyjs={yp@;LVe8Y>T2d?MtV90BclS^tL&St^>r~zAlt2GVF&t+ z80hGyBOo?Cnme@On9I>HsuUZNR`}_-!V|Xzg(mC#9G;rJHYw?5|MW3s%<3%3MS?Mn z!KhOxAdVqvcQTJ$+Gu6eEk)QIIuJfH<&s?D0P#`9j;E7YaA5oU3a6FsLv-&q-mJ~h z`B}%mX^nIl3yy9LoyD9)8ZujUy=rk-PD!Z#S|pa>_v`CQr^CdC;=N{!M@ipbVgIQc zuvDO6Pr}t+--1E|hZ<6Ns{#{263b&4><$k4h{LF2P}wxx%a^=B;VIikisH8)qB?Ww z#eUc`-{$7#nFm1WR;sbm7WeD+0EV^>fY&s(K}915tk4(+6Kl7AA2s`SmwwvSp(?o zJFhrT;!Jw4YrUkK+8KGtBXj5)i|6uNfG13Y_* zH|*c0U}mvVen!Phh@wy8x#|yqV2FF*<^ieP{_&r$9 zGvKh;Gfxzp@*gi*%%PY&v786kpAK2gE(Lpt?zR!b`mmjGHO-0G3JEufKQ3inQsnA~ z*|+hopX>eDW~hvbNDz$3kxk9jGkEK^->AW2ra0!?7D$3IQSg1VQavKiEXwRsm>dWR z#UimrA3M^i12zhROF>N9YdyOk>aR;ujw95kD|mwZgZy&4cDxt(bD1g`qhAw2$HbB=VDk;jiK#&d8{WiF3?$yotOI!(jn30#vZ1+)T7$U_2es;0+)DbVEB5#AuPR8B@`X0zzv z?>o15jb{B!B~Fz)Z3cAYZ44T39-$u%eFsMe*tmjfFtHzcn?Lp3tu{Pw``8_4m(%l` z=bFrOUK9P}#aHa{I^j9Ai{NM8Weo2^n7a{7?B2~o8`E93DarWQFt;<9d;<%GBTn;i zm!3B~`=cdwGzYAVcJue*=ewjfBSro{Te4Q9Prv~#A!WTi?B?>zd4U`DdZmulgMpUW z_(5!;BB9)q>%Cs6`VfzjnHF2M9p(Mm%neJ57jM_ZR4lXY_AyoolI!#=sGSv}+MYsM zZFZ$N#3cHTiS`Z?dwISxB_%SC>!f>rCjnwdMPoZn5_qpBGD=J3wpLbJ@q0eFU;_#K zD6DzFYS)Kq#f8@+MQChjh)oUK%%M)Vz6c4u@wvz6U^>KZgcWZd{CIqxlD#UBCh5i z#I)sd73Z2(XEC}8*YTJXyFDS}Z!7DH77ncFm)@Ez^=Gh&1Z4%r&?$aSdG*X!DO>01 zAtusvrSUuMp}NbT$uIt!Uq}Bpzotl$>=zR0Y)aX2lz5?Rs&J?Lp~g$nLlnPpW2$iD z_ORs(+wtwz<*Q5wjVh68!RbQ(K^i5R;cm;CMwN4#F5(QVc0pjh+!TGs7+IMZ~g zY+N4sN+0M`*^(CuBTD%P748?V(yDc)cYhk#v}Nlb=!s;g7`PR(s8>2r{RP?73Mp8j z!>N9b)$yhE_umWHOm{X7#@Ex##>?fUi+z)3d13>d4D*a(Wwp``a*Bt;lBIJUm5rRr z8NMCddK?Gz6{?n(9E4x};W(z2msg&SR#P~}_Vs+Y zkK6um5Jd8H)0T0);W-(nn&vdLygZq;ZvFkGZl%|#5S3~c6^myvn~SrnS=d*{9DCc-?1a3oQ0wiJ zjc5T*)2+Nc!d$wI8(xfZXh>JIfQXSG>C_V)xzW?NSnRQAaLjyG)<86wDcML)Zg>_r z*z)licB52qU>6QuE_uoBO%_GeCG-x#At6C1qc;9Q5IG4Tt1t!3!irK1#@jDD+pezS z_&B%jJ1+ zx)em(@IIScG{l=>D5N7*Ehxw63A2pNCpJ>k?wzlom)bCVJs49YoQz&^tR^H{XSY=? z9NxNVXmxL^U)MO76(l)r#pU>3OlfUS7X)DHn(ZFe%j7L@@06DRado-kQa3eC6!Wy* z5ItK)mz7&#+2~!TVpR{u;jA*a&F>R^>0?itLQr?Uhrq%qosiR!04Y(jW;m39Vo1of zBFJ^pUxf}_r-o44=rPQF0q?C{3D~YDAw{2?FUMs0JOJbbe_veva@VbH7=RBFDhiXl zF8mPZjDK63i(1DX;MXu;GMzKSww-n0{Iri3?3VY; ziyT+jXa4SzqoTX3FL2ks^dX31jp)Q`F*Qih3#%D#c6HHxW-rbw`C-d}N7>ry>ZbVs z-}S!IA|=o2e3>)SK-raEz{P{0ozh`B%gwW8a9U&3=%faH(R^YkCW@__GiQL@ip!Q6M3py z6n@oK^AaO%U1NnEOnGichcEkol@)_dvZD%FBag+KgL}Og=Hpj7o)KhH38KKekLnFJ z!Pa2r@Gp+BkG*aPlzSRJJoSztXbgR+hv$*3z21?m;?eF4-V@4`c#5Y7Z!PkGmut)Rh{+S4xm!~Bb5&fv4DVa? zffqSyL>=fNK;0$epUvk)mYX@bPo;^e29S}cLL(# zlTgIMKIKflECkTeLeKX~RFn)1=LvAmM;l>%R!ftf9G&}3m1k{P84}zkh0q3NguxPK z!lvx%Ppn(R7f{~*`p%=xW>H;qQKj7XaTyUBs4RlG@!1Jzi5Oe$++`_cI3l^8#DRv) zNUpeGo%10v5+U2yyE}LP^IJx}3})MV-@O=|a6JjR_07%UpdM?*A>OAD32=S#CQ=(c zU8{^vnKapG`plYigO9V{fU=MR^^%L@Oc!1oCEz8+3OY zkj3>6rcXGwW{2nIZZARS)=l~G@nZ04LjcwNBRx?V%A3en1t#jP2WhH!30IupPdYYN?PE!={Z(Pl4{&?~iaq9FI$vPMBm{;}SPh|w^QizKPAyX8QE z?2fYj$4I3Sx<=hcN*=D~S418S16qUanb^s&jhC@iyEO<#d2wcBI60UptcW zVK*TfG~^!M@YG1D=5=!Lu90xqU5tzNs_VBS_Ex1)T6@ukHfKY);CRivWsQ9BEy2(1fHBdK;Z3JU<&p#P z6pQd8!kux_H*UXi0cGQNnhmMJo=?%)W@OFB)gXAK2BG75?tG4$U1KyUVoz!Ed z(^qE{I(ysNQom_EW;&-yav~ z6DVHPZY5U~<^jwiCnyrHb_2c!4$4SLG$!76;5%6O{pCNeAZqvtx4+m7cs%aGmmiB& zN6W2Et`_6FVs-0&xN```i7FgDJ2bbl^DoiVb)+sW^cQT>u#y7>cY?L; za06z;ev~3Z-iTJ>j9nz9yn#g z52PeDPp2q!8m2!Ga4*N= z1nD)mGBvGMIMFQX;ok*=Vj12#>9ts8UMAxG95W!TP}a^iSKtBqS(S+YT-I#AF7vDw zb-3m;=%3oX&*{69dp?8{S;gO6@ez*gKMtmIBJDcM7+%$D?~Ywp>`Y)+oMwjx|f-u-enZ4mCesr_+gW@arWwTi`WfTe$BJxOy`fCzLj1&4Sp z=X*d6(z3g|d$}H7*2gqZVFrw}11OS0o_e6Ld-(!JbHaJ6z(J`=$i~cTZ=iO-_dk2q zcy+Rz4xg+8OTxLXv+)v(PWhDVSF5%9?w}a?KdU9Dc`obf!==}jmxuv*Xj)BnrWrNJ z2-`39xuT|27++T%^I*GcVjv)9mT(A#y1(-M} zfIU^!;MeZb=p3^wbs|Qggu5cMN_PjG1v#=f1vhTQa6Nh0uwc>hiMU`*NiL_WH@dv8 znfh%$_M6?PT6)w7i%>gYvs=a3er6&uO<$m23pCVBm#^MWwF0}Z!cvxuocxj(Jja03 zyiL~(FavzmbXd3d0?2TU0b<=YSQ;^Gmb3PbfFAW7NFd!Py1L%j6T7nY*F_FK57#Sl zTg1CsiK>=m4)jMYX{@Stj>Hm%p5frAyt^-U(ChyjzYwae;DiTB$?4D^wUgna{2T@ruFJ!!>YdOE=?NOraqF!y<=>}k+K6n zGr$G-U~&_2W^`#MWFiQo9f^kyYN`)oVxB6^Nkg+*gg$-8O#fhnopKCTy2 zGBZ^10Shx+PfH!S5{=GoPOnlIgBk=$d~@Y_jrlyqjTa8P<6Pwqd!F-jq>{P^_T{k6 z;8ABigMK=IXGH0HxvPRSxmkE_1(2QQ9x0q34>Nn&qK@1Y+xAM5ciX#IU>q&x1^vZ^ zw+C;p77`}C90@o?ykEIXn9%8@N~)_;(6L58Os>ngRtk7%sxsasq0U$G(r!m>ySAcQ z>ZM(VS#bY2r&^7sp*+fpz{CdI%*}&DHW0Y>`ZB&{64ghCQeUE% zFt_%j7gM#5T@9V}OVE<+Xf}Ow&LRDxEWH2JJU@99R$XCH(swSaVe7P112G&Gt9o7v4Ad1-jU9ad zYla+isfSpO8ny!ERIQ28K}$HADY(0)qt_nIRWKtkkx(G+exBs@P^S)+=V`Aztwu4Zlkd&Jf{OZ8t3Qz)R5L$ zJd<^mh!#zH_unyh$Ilej5HM-vt0nSQZH}jTva)mTG#!qAk>0FziywSxKE8LzTMafM znf%5#n4CqDBQtgsm$NNqOm^?O`u2b;S)gk$F|GZ|nX}Z8sVdjY!|Fbi_qg)n2zk+s zI#k-Yd8oHes*hKb|EX_dAv~P!Mn$gkMTvIq!c%Pu%34IDOv0?j=6&L(iBFULrj@&= zq~i~*zS}?+t9A*9y7JFeTclQBI%Q8!LmN)YjC`E2Rk6PLt}>9IANPn-LnF9T_w{cW zNZJ!LM-dYF*boFNWBtd1R!%6f$G`U7PB#Kg6PrJ@zBv3Szo_DVf1eMi26xA)(r!oh zm*QmF!(T0=!v?aAWE|`zPz%LqP7)@Dgchr*ynCUL{Q1vR zXEjihn7;wW(oKI>A{!h#1hCI^*NmhqlfNGKEG znivL0(;}NN8v56fgJks+_-@^OHGZ$efmBT@R>#mw3$vb3-^mew^fD!eJu5SXoP6o3 zx}5mRqT5`<@Yk-T;#H}^zdqn&prS!22*6S4cgQ}wsW)Xv;V(1K@(loNFRzbfoCV{N zR2wTkrt&gJgsn11L6|!J>tnrwIh9Y*AbpT#O9sU<$?#3GL2%tB%D;$`S6I; z&5|nLNOv=9X8W(!C8W=-vlv!}&%b;GnpdT*Ps`Ubk+Rv#Ulbq@_T7NwJba-eas7a>9mk?QbVr}o@Cj7)PGl6-hm7rSaOF-U zv5g{4k-9;B$zwg_jg`^$tPgap%oBI8q9IkrdTvfBB?crxfFDe0E0YS}+qnLOTsf`67l2&Rb{(j0f3*$YJJ^#Q;-s1rKNS^LjqF{; zO!XE93KM`wr0C0(d9ljEuqcmeP`dkrD{rd#$meD;__)7Poe(%Q_ z@1v)2|V2L>`VlC^{0AHxd3XC0%sMqVF*kCwJ>nKyAs4Oo?Y5R^WItYAOQ!+mm< z%HE0^Np&b>qtBlwU>MXir7GYOiwT1m#kX0lsc#Dnb_4cXYL$}#PE){ngoM} z-DnQ7oa*6U=CbD3>?(!yz{fBhUrIgwvJ8IhD-&bVd&fj(?Qlti()bh$kQLD#$U((_ zha9hVm1_Sl|C>*y>3TE4rPpmU&ZP3O^YVcMkH<9g$yMOf-erHy8CwHyFQX3IyT4yyVGCWr^H_NmgjEDQ=^se-i<1WJG}V5 z-r3)9$g}ywBy(?Fn0QBEyXh}|;7YLKeHnpn(`TDmOev>g2riCOKk<;{k7|56 z$Nz=`$}cF@FRaB92Bl_xQ+{uD?P@Ly_^fRAMp2(_>G17!r@logjpGNL#Eqepl=6xS z@5!LXqCa6w$oq3-H~)NqkGY)5TVOsZ{59XLHc075}#KxP@R*_%gdW;kpu)gO2ur567|=wm>Uo; z2|#DXV{VLtfym^VP2|f1;onn?3cD>Cpfi9Ay*_&7SvvOm{$G|6?hl2020yDv5|3gI){r0nq3j zAo<~5MNdNFeRH-W2>wfX!$3K)0U*$|ALu8Hjg3WJLanL{TkY<^gfwgC1DL?9MY|v} z55{n~J_94!3s!dZ5w5#|N;UavDeUxvPHa?y{&;57_keqy2F~VNS_#Kbk!2mRKCrF? z!#UCkB0v|!cBGSIK5^KOT(7{X^ zUS;~xsO9YYG1My`ZkNCNn~eXxY-qmI$!_`cOdED?f#Wu@=HQ-hACr5gKHd$dl5SHP z@4R~kZj~KOjY+Pkk7|}~;ZuNoTfe`6kzDVwZpzC>-~;uKXyZmGxeuvWA_od4a4>`ICR;Jm|d6p~9`UlL?U6)s}idgy$8YlK4719<> zrK)wlUn+hj&-fkaNXr&5YfJgT}=HBR6wK8-#_$PmdoRkQ(a(| zJ`*?iob<94oPbk3C;5UDDZd)3keKD|C5;rJ$QRKDk#iLf8$g-yyMRUeDO*9zs@BN_ zmJ4@EqxR0PBuZrW3|~3aW%B<0u_M@#A8%VuSV|%aN0+~UbFQiMy99Tp*};Y|#-}6Q zwol@HP0rr06G;Sl+a-%EcSCv>CVWqQw+EE0QqjlO2ZpCI(pUE?xjxxNQE+Av#elmw zMF0Ju*$+6}lY~dG9{zkziI`w}iKHQ=i4oLkxaYr3JYh){Uc~Ti_t2<|L^@qlK1`a) zeETMtvwwa|ic}rUl_vxZRp%qe5EB-j=?>E{}b72Fug8UBZl;Ixm{A^@HuH%1F`FW~D0sg0W+D z=#2NT4-%@$irdZpxe}s(-oYM_l3^aiM1Og2eKz3Bsq*QoA|%agw}HN4+yN%}u$c=d zuAJ-j-*WWn=UUA#)0^*Lj6($+*S#3m3^EL%KHNwo`Qi=n!ua)$?}lzC+N-ej32(9_ z-P@^HW_W_}rD^{@@z$oz)0t~Q6{?{u0>=jv$r|#E8zXwBbbV#~g8UCVq!uRFsqA_B zeJ|My6&`l-d6dq=c4Bu3rD1Xui6RKkEf;t4^C{NjxEOo4bn{l23t7k54VTvm{DoFO zD_dz5!bInIR=)EVffGHvko&pWMUYu;_j6>CsP%L&H(MTvptDm}Ut)gZDh`UD%gs0p z+g=${H@T4!#skNpV%d&xe!y&MdvTz2@`+fw|7g8&J-Nn3X72If2!fvHfK|B8s9A9Q zIAhI8F9?U^bU!6%9_RMSE$nyXA=m^oClw4Q81I$YD~OgWPxfi$F(&TSA?3dP5ZeyU z0-5Pngo^LGaCSkAyQw*2Kj!_a1Td-$<18fN=hv|lWSEEajoS!QP8l;ukQW)N*7s|I zS{dTa1zmw%FZvbUSM`F(_y&3$X$;Bf1%wt8Bd>tff}W1ihE2|?p1}5$Z_L!QV`s;^ zlK;U1e6YeKLpCpH6i>a+f+{3atufrlAo;M<)Tkm#U-q;vICZ{X8AiJGobTqApHYIr z5T>^W+<8UQ82h*i_K(eLs@dsk1JcDuDM6$-&v$SJVwmq9{8{q4_02&YRD8Nhm4-wO z=oqw5OoV^_BB{(xubkf?B$YniIyIo*_Y#&T{L@f)JXBObV`JWmQC_s`Y;t za1McWZ9-#u8b1lv{rrt;EvpTCawnoAnA#DQ-pHbdgf^@p`l?2ij%pAA&f4Gid>@Cw zEQe1{78Lju^Nm8-t?7~}&&BbJ4_M`{2zVx@v6~uDbiyR)v)ULn`MAJSq58h&c_uXy z4-ap0)c7>wX1h6a2=ahjNxj+xAGLL;{}^6{V;Xa1%+x0qRkG}G_Ux28sk1*_i^HU* zOvHdzpJ4m!gBj&jy`T51<8dyTc5Qe_zHWTTWOK!d5S#V$)^bVVjlE-nvuIeM-%S6Hxd2*+R-eC0DE)V9sp~hQq z=j6Zd6ytVS8oS^wvpH9o*V(V}XEU$j^Z}DoWfCbesIbSa#YcdoGV9>iBlWLf@Fv!q zs64F!pz?5opAYR-^Cn?>hzK_a{8*K%BWodEsQ2C(G>v(ip4s15m~l7pi^_#owNREKPYj1?R&VJ-o)Y4B0=P_$BgM4>Eq9{ zkvL{l1gRPUL(dwI>UY*{HY%TInq4BTs`jaWuVVV+Aw3s1xw;*Zez)otv!I#y=6uJ` zC}pQa@26o64kXAK`l=#^I*QK%>H3jO|m6!lMw$b zD1T2X^TADl+r)popQ07r*~p_an;(br1kccAtRBY@B4waJ2@@cjy1T2al~}aGui-H| zdJV1LGvf8xFkXhVFxr0e2gu`Z!v@t4`_M%ZI3tP&nC_w>j!mB_G595X?@KSjTX2>l zLC$N+(UU}o1{KR=8(Zs6M|l!x1+k; zlm9Gr0~y{Hr_!sh*3*mj&IQB(Bctg;hfUc2Eu}~@)>|(jd5q5>5F+yi_XVhvQ7q`a zxf9|l#~N5g$;JQX8rd_+{nd!6oa1T_AXQA-pBK*h*g>EDow7P#nA zt5dAE^+gi|xS0HQCv0@UPFt-p^0&ckq-Npc<1=tSzJ!S&s{%=&OXBXge6$!E!@mvY zs6IM7*&AYcDj|DJCe1BPg47$7H)J$NqW8#$72~?}2+U?MGJs{@&&zzX$*oegtL59kg|c8X!G ziEbR5STA8KZ%6g#vgSgVGD4RqWGpEckC%FMa%Ip9?r{vVUp3%fz?hUG}3o&Hrt0G{BKNaH{prAf;83CK&9%$=V9HENL|7#U!z)Do%b;# zgjfJyt(?&72~Rb_=HdK*EVn{vwq(>;R?Hom5Ul_(?rP%i2een6NMrpn z(nOT6rR~#xxO7}6V*7M|-O?Mx=o}b3*u}t)R|*%m#|mOZ4aB&D)MQ!c%)!ci?;69q zD@H}?j?*cXUflKNY_X^Q9(TQ~{J(M-{ano>t*rYI0AWbU^zac&CV`gjm|IEYP38{Q zwJC66cYqi|aF&8KqoX0_V3s9BUlGe0mM~=(-8pft3zNOIEPjGHveDCOeo0FF8)gtL z#FQc!r!}J$r|}aFtcF+|6_Sdga=^Hl=F7?2dr*cKQXX5(n%82^i;j0wrpX_MWmj;| zm}MbJwwIB)mACxZB3u}p_Y2*8xfmhkM(VBRACjcaiExi^0*yDbFt94;nIP0Ea1B!% z?W8|$U)2`5E-St^8}2&wfRv!lWUI>?ra zG2$mdKo$m7%lQeu?&)knKP4nk81iYjM6vYIxfZZs;g6v>$Qid89XaPP8Q9ESZ>crP z4|1J{c$k;5LVHI0@GU6iE-6_Q>vd2UV`4sn=}7VOLWD8CC_1y88vFPIO2_09WEFY- zq9Ne69W#~Ia-&OWqC`oUt7tbQR5*UzY;=AZw5o&{^=+zF-W^{uyJ#zgFgUGH+HbsuC65~pSGXr@ z+_+aKO2eNpNUI-9N2Jcp%(@wf)!uWZ@fNCh0_4{rzD9z4yTK%gD<1_XFWiQ6EMwXk zB2}SsL|&NaV>5gsjR66x(^yV%d1j4L5*H9EB;%laSK$`bcmLK)0oDgVz;96uk6|Ra ziFhlIy`Xp5B&6gM$|T^s{w*F1g50Sb7(E53dLZPnb|cDO~AY%^FJA zypN0NdGBXo?b=|4MyR(U1~Ut0zcM~zmlW3!Cd0&!i^Ep0!S3)1Gv&ImJ6HLqQgzee zB1(o3ik_r4Z)g*{dm4Kc-H&Hevjr7SrmteaDRS=kA*y=>WO}*~E-aW}^ zFeyLV3~$_8*$YYcc@ajmEVmgsVtf3i(hf>O>6G46oBVjUL)ZVxZcdQKj6!nX#W#^o zd^wTt^{X$a+o^}Vws%jQow{FlbTSl>X@PO3xY@O1dwie4_IMxb8Q@a?Oi6=gsyqP| z&oh4u2Kg`_9P6^r1ApB$S0Ox_b#HiOg&rT5vAgMy^$y)R8xkSRM=RvHVcsaSpUK0{ z-XV0ghziaByM(m&wk_>-*Eh4WZ0f1X0C7!daKLB&lq&s<4wG44#>lF(^#krigeA;v zxM!02BR8fJX1NJ~Ign_BS|%#vk3ReD(r>1|5z~qcmpL2sRy2hTI=!OWHL>o`pA%}z zU?VO$m04yaVf!0)X(>aWkZGcVyH%LGKVpRtqdgR3nvB(ky5j^tBUEhy-UWyDT%R-? z5IX%cF`Fwh|7Sp`dBDxAds+=E`YTB>5d=|298*@tKi=)HkhQ`@CBaCD5ZcUgB902# z=(*&WvP>}R@p;#C68{ExeH%T2cY|Hcp?V&mb3D@ncYeAZSiwd`iN zzvuj@8SE)ClRR2n|D@X2cy?Ml>;vau-B@_PrPAMsTMsF3Slny2z^Wy?JsgL+zs{4C zrnv9EGb{kzruGY7r=%$xli@dl7=xS;zRptp&DPEYw`9KG>H#f6P{4Tdr9OjEGm-@P z(}gkP^4|eTc)%2cK)(c2?NPW4=0!+F%{mr4eid_&_9+e*xOjZwtb=ju*+u{5wLZ>I zPx~l^9j8U-cfWL<0oeCy<_>%c*hHpEUvb2 znDvv&G?Bxf67%FF8W|JFqfurwnDh zOR+oiUs@HOIXPW!>@N-q$lm|vEp%E=Yzqsum73VVD~@0xi70I*Qx|L;i0(dLogU}2 zE)0k0z{6v!BRh^|gZLMv_|>3m$^%Ee1;3_QTi26#krxY{Tm}Bsg$2@AuYW&JKXuIV zQL6lalYf4X(y(`aY;G;Jf67Q2yuZ4`WTWOCNY%4R#)AE%9Vu{a6EIUf8b@%^^A4p^*`tqA76SsRp(kA zgK<|uKr8%+RF{7lVLgIZ`xfCuiw8YIeR5Esn9@4`gG%x zny;I;jL+eoYGxO?(zxThIl%|5qO^NYE%bjeNWWB!J&T|1r?&mpFMVLReNuYmIAq56 z$E=ab@x4KiqScCS-?7`t8>!y8K(~GIwJNBJ+oAB#Cgx?dS;u2pu&NDI90Yr+$< zw~g90X}nR?Kdg_f6-&o;Bosa5vA#o0EA{QBy|0Z!?XAYQuhWOv2GuH+UYoT?-ddSC zVi2l3iyz*9lE1pNepn|Iq)1fP{b2ZwPRi~d5*043-)B`TgLm{Zd3G2MxORRInsP55 zhel5ZTyro8Ul}W05)63Lyk>AG-CpV?c^U2hJ1T*DqeT8|ygFCwSQPZU=mi!mU@fomwcAV7BUISG8L`{2+9E zz-~A9VoZohJKNj(L(C_oCj$usSNf9}+%?xT$!#51JR6x=2c_*IVwA}h4;V8rQPXUi z@ksOj{|aL;f_TBH`+M66!d*gwSVs-Q90%#&gBM5M>awVrUrywCdqTvG^g6l{jd8l6 z5_bB@(sJ~eHDzD+64a3_!n3pB*_0P5E%Pl zKuxZLUeabr=hY74>Kjc=u$@0}P8cEX5_wUeL#bDUdBcih$uuXXc~x7bTFwN3Ri6*A znLk~W{)UYnj&h2dwf=*f=!`-bW?T1sdP#^{?|-qD)g3=9$}v&aYV}LhbL1RJoN-Zo zxXAjSzW)nM0&fA*$+usmr6a+%A^r+FCF zjfW*h^^ze+dvD|9%rZ2nir(J%diw6WO_ZqKA zE!WPz{Z^CBvjyipYF~bK4oF%O8w($0jEL#?MBbMBFIW=^Qbg3gqxK;8-jNz<5$B@( zz1PW&uA#>!>B(%iSNumFe>q=v&!&?*Xon&^L{Q7kM?d~_sbmMrzaQ4Cl>`C`9R+YZ zN1@OBqVoa-o<$)BppXASPN5#r%@_C4aZzM}y7do%S-c9xkkMOeJO|_HM5V_Ys&~f< zXH`C>2Rz4q;-yB;ES#r5+AZq)jYbW5f^aynz}cY}yWwM3ARR|$_ZaZZqo6AFe*pZI zgot6^f0!CahnaN_K*AYk6)?ECVdHtmzEu|0)>?3&Dzq7Q+HrFwNg$!ZVOGH*<=S9_ zDhGweiBf~gtmCN`Ik}}Ui`yu>(RI7@-eEmq_qFSRiG*~A1bGqk=;!zaE^jIAtb1Tg zI07*L-?P*N*3et|9Q+B-%lA_gtcG5{Y`GfX8`O^mYles82_H^d2=sS_%Z3lJYD&hvC)4}<=;&Z~oH#aH)PUGrsR&NoNSFu!jdI^7MP z3Ei{frdYdAisvg$qB+?L2@~sCL!Z2nRG6^2pmi{L_mV{|zT#JYdu8iab-dOW z$w{*4!WOaMgI5L zB18ycA0nl-yYP2%l_T>+v*)v^{~%)o*>?vR!mr(y9pp`E>Rd|RrARw-5Kb&O#HCbk zAg~eg-K>44ZZp*QR(jV@Ikfv)G8N4oSe^Kvm>-}86+>Gd$Er)Q$cIUAn{n!rAh~PY zJ(0+TjkFoMjer6|)gal8kGP1GF&f?FQuFtxcgQhRvV6GA;#DrC!Va^V@W ztW=8L4T>k@*ZOXcyk!WBo0>A*{owal<(fC|w_hU|T0pD)i zP8AP$dm;FT*-m#4xxWd%P{likw~8mgaLHZ5DZMd=GALeox5Smb<2d@v#AW7Fpe8KV zm@->I^zH>SrJVPTf?N3t(_-6gKeo3%d*psU*}J{`%D&4=ftDoWa|lkZ`N!vODrL5t zX(pnK0>7DKy%S(oW+sT66XHHr0f#`xl5McGU-WFsp388Dlq=Pjy4QB!yvycl@7n;i zfZs>Y@DrUJtDEMyt~>f|Ey&b3AB%pSJYeF|u86@aV$L7Ky=ZVmd1*LkIO51^8y&}^OOTHE|(%!j0;uF01gJuG3XurHX zNZIw<>vIbEFI@oBzM<^;*F&c#&7a34kNo=BkF?TfG_-THLw~M%{OHfs%FYGLpbVk~ zS38e-UDrQU4n7lK(N0tH(e%~6+0(6XV}`N#^ot#PZ@MUd)#|avYOSf>vVj5l>e!(;U5aXBq^8u?c0Q{lxiecjf0_echEiT5w(W!Fd0OgW_TV z(=*_=(F5b8C75dcLxZ_mq-&0hRi}$AS2B#6^Gmsbi$&*<$i)tg)OXsf_!B6{5@3f? zj@G`7u_8|yR4G?R-u*H@X-h*J;2Og8H))j0SIG|J#h*5n{_@Def&zvdTgb{q3MS8w zBNE;XA(Tls-RSj$KTkiqj7Ye;G%BGXgmWi0GD*0Xrc%ITJu9tr=G}gP=J)nd003{m!-wpP7YrSj%P@71{rabrlV= zhH(DNX$Y(@Y7k}Y&A#S&N!t~JX|))`b3I_`~u#g&)-9E`I`UPc9*YM!`*N_ zx1FRtUuTnS#oGN@x*4xu1En2LW+T?q+L$t}1{*9Bx%_k&8+I8wzI~HU6*)-kD7j*G z{PT5n`h$d3vp8j(I8wYkj@w>{k`U=$D~@#)nxAU0C`I;7+*8Y%DU|dqgd1g+R$-r7 zHQUD1%`;2q`Z^-YQIw`iJruWI(eU-&OjbfhWSoo_;=1oAeyu0=VX{`a-$}>s5};2s zGcbRm^2i|j?-SsPxO$jY8GFgLXNmL|e~&0@2cqv!(;lA6;hgOub)80{xQ1(4%`md8mw;GO%rl;Sl4j@7oislhJmG zR1tx{&57WUf`Tll7)Ueke|!aldNK<8X`|7c#VQ0IUZ4K`0p`p*t5aLWL7kd;`BkGd z5rktkR74@2G7Mdt<7slZNr z(cmkX5L6{QESLVX4=JzXBjyQ53vESd(GhFVO^Xl)>yDdrw_L0!^|s=9`>GI)W%oPS zK4Mu)SLfUw(`L=n9WsCbl9D@T2NHCtM3bA%xp5zh8yIU{>jvxaDheho)G~+F`6Rws z_0K*0eEndnlll9v))xv_yQ!kCNEI0@b-OvOjdNNAOd3II@r$F~ecSyGNSp03#Bi zcVSduhlvQJ{BvM0;%Qs1z51U=!+Ig-=$|f{ozkjeB5h7pEWjnOx@N&oyE^$`sV#@(}BUgU(yk@BDOML)~fYO zwR~rP0n>cSXG&{`%E9&z_Z6kj{sF$DXU@+ih4U<5zC)34B7+~mk!L|}tCLLy5O@0n zjL|@u2-Ja%Nu~a$;kMrGMx|F_@xtFE&P+DY9gNZyuf%%kl9Mh7`u&l9uN9x9!^|1^ zwm~3F@kzQ$o9g9(21)UUXGg6J%Tj0c9((FWLIJzN)?p9syglm$fZ0DVM!(GDP)3ILLy6vih}B~@Albiy9b7!{=cWchtyG|3VU*f(Mb2>_^(4=73-2J9fKPE zv+Io$0+0$%%9Ot$f-*+%gz~hafkJQ&Tws)kSIUGSU_aRP3+O){@%b6&?)Ir`ZCQeW zZ$j22MgN_VuBuTxI-rpfI6B!->mGLWN_j2$_84Ca1z9t z`G0vcDa@^z=*sG>_z6c-D&*tY9(kEVheb_;Hh&Kmwti%lbp<<1<5#%mH`|lL1V-5E z@UAuClHewTXIJ&D<#UJeyU~+<@zv{(p7)woUA)J+_C-&$0Zq5aAeDAjyLXUR+uIT{ zpsUi34oLYhW65ZDEZ8q;FJbEYjwT`wwB_2Q)|FYxO%1C}l5@cymw6=eeUsaT`*cs|1xS?nZdK_iX^e7R7 z%pnZ^J&D{((BBJO0-`rE!tHcvoyspA&m97#d*I8p+0=I@rw!Le893vbiRXcnk&U7W zWHqN>1=e~$Chd{>mFMIB*&5ZUpopc|+LswYI8m`|sF5lf=YMwMq1K9dBx8li^0+ZX zM&~>OS7q%%&}xvLlH#p4!pX@wNX`=%g>3NLf7MEUcca2#s<5j)J{kxqV+&6=*W*$= z2@~-;>Ve{I8}BS^KU`ePtvnW9#Ttu`N68FGXevMqN-**nSMWTDv+(t#AkM?UxiEZh z1H^ywzocxLzhMY$RYB7)UHlKS zJSX0Tp(ovXa`F1}AJ-XEPMXcWMi_H>V~Jp~z&N#LvdmB$)I73lTslnI8D;-|BSQK( z#NtLXLQX;jFF<{R;#1T5V6%90iPh!bmjFb17 z(iDiKtD^sG6IVhhy`HqMfcdz+*3`B7!|){#RLn|qB7gDReoN>uQNrrcL!JzIZWXoT zh?fEN4u?2C7Q{};3s3!fo;B_|iv{fdtnPWhiLYR_zg2oAz4#IjiEnx0SvxoW=Ucd3 zyQGzAp|k{L=2IP3lWYbT0l7*3pd^_FW)q$4|HhiwA?bAqVK;d~FR9`&~u zdo{VarR%L$d3u)k`1tY(&xYX^QDmF$K}rGn$GvY|@sn~DF8M{C=09k<(#@yhtQCO4 zF?WCP0U&FPK2tGRuymwAO~oO=#+OCjeAy*-q7rHlL`v?=RRnMjg)Qo+nlB{JsE~JV zLXE+)mq{|lZ+%zS$BL*n<^)_pCR1~+l&+3fiBMGSmB<%K4JJ&CvHW!8071~rcs4r+ zYQbq{;=j~^!1m?qX)iX%oQ^ijFJ9~vSwxc8fAPKnMTb=_(UvJC@n)L))W1{?%X2uG_PJ>Jyn2|Ye1G_+w zTQTk<6t9(#@OobfvNc|MxG}~R+p^(;M^oaz&glhv8Gqz>h*u=$wg~Zx!f@l}?-S?y zTZjHy!ugHE;Hyb)u`oZY?^DGWfKwA2#A8sSsV8vkrgN7Oe^l?IiUkEy;$d5<3+NsZ zY~H$^fiVY5rNQA|b1>F$#tn@3v$X$&ZENwAqX}PDSyyffFw0%xirGajCXpb$KW3#r zTht|iVSr-40w~CoRg6!A=L=F~L$4K$zFJkUxpBv_nn3rh(SxmzWgJ8W8BACKmma+yqQrwK zzNP!j>H(Um9fFH0VgAfr90I;-qnlPV9rcQrr4>CbkJt8wS-g5zgh#0zy;s=v9HO7@$z8_RlT zmu+meRh!yzH0zc7r!vigm;P*rRQlH@_y>LyL|XPxI=PXR$}ITMC!9DWlc|o>wJBdr z*s8Q--tAh^7ibNuGqJr%dkMC~ccoo}se+vaeyMQgVGklxr#1U+?me5v4 zb847Sip5pd$pZ~5afV$R!szW;GVZK$(k>k~60KHgP>E$lCCt?7^5x66)a4r;Ro$yj zN@#-kY)^D3wjS&sR*k68t(Tu(o)PgOpJnuk`CNt%VJf+k4(jf+7SE{SOZa42Uc z+Tt-3cCTIw#ug8OVfeICWlhP3jLVcx?jU!7`jv#dCN&A$%*~?rol-Togbms0r*$cj zhk|jwcoC;u?hR=I<0@e;+=WFwTIY|}J898W7s7kYD==<5v(hT9*>DKRe7@iAsNBQ_ zBnHXcOGYW@&UNm%dNq7z3=@3Ji#Hf~;w-GET`^f{mPJtp-$Nwf1(~LVP7*;mGryh0 z3cv0#BvC&O&=2S83+g0S)@s);+{cm|k4cQN^&|$wkS4+cMS;tB5XYzj;Grx``Vqw# z05dixZ6b-J#@qDgk+bhe_#vG)#p!Rvi8pB-3zB0plKb zhf0`Xa3?W>l*!x(b#Am?;Sj=v{s3`6TZydrmQ}>)R`gTPUlg-p19e^xS8FE!g(>BC zbN>|g*rIF+)3a{{NX&M?;4Smox@T1F%KE{y-l$glMWu3AgRMsU)d)hQiZTYGg1lHB z9y76atxqqaYRPcSr)EAWn(#(^fTn5^50g~UN4H;klchGqm!$nu_kDb2nh4PwtTJ@o zYwG}qA@BCPC+=%mpcF_-T1MPVg*G+5b8g>!6Kmn^q4IZ86+AgmscUea+M*-5oH>r#wY_YGfc+0a1^38 zj0?O7olVT&fNa6O&-xXS4M+hDKQ#E?ejEz$P&2DS%uOGmVxq~?_BiO$s1JnCza!%U zeo&M+?so;0+wtfi+a3H^2%iP@qtL1C1n*gtz$f zl!eBT#c;GD;2u;m@}z650F170Q0O_=0zf$+ahCzCce92=oC3a**)MC<2M);U_2lg4 zUhqHW38kC6IlxDWLOjx01|J1m3>%y%aALk+F6H=PZ{19P~=PaL=` zND2+1j*2k|J#99fLP>6f(NbbTC?JRr)smkWjB`Tpr}`Gn@A6o1;T>|dNae8)(QqGY zpoO^Y|9(b}$&B*#f}OR>N`d8beJa4Qgq|De_=j!M@;bP4Nh}IMjOJMIHc)gKeKMJ8 z*Bs+Z6C4m|sv%OGLIs*k-e?iyJ_dmVmdkgPf3w0AH2qUC=@8nfIVD}p*NtKmOrYn| zWBP(Ng-jnB3^|Zrgms{inNjTK;m_d<+{;unSby7#>Qja&Jg@B%=2jK}xLInBwZvf| z!3Txt=iiAU@1!Otf4H>J7R?Vj+EK6Yw`7`35r&3_6x7toY4B@yCPj-5H&BKV8uOEZ zaqbmN4eu}&%s=4ErJaC^9hpiJ&->>l#aNhG>T^Jd%z*0S>9wAlmh-uE7#L8?*cApN zQ!p^l33Ue~7inkmix>M-VMKJ@Be_~~thMF@xbkUDy=mg_Z-ihsri;1d`3}bhp+hLJ z8r40ZWL*QFaGNj@_nWG^`iv;pNrzBkT(6679D&ecFScyM2i*42TTpWo&vGOrgm7pl zV3h4^ zt51PTzp&uB18QUy=bC~r9u28oOS>L$>N}8&42lBy;4#c0*&k>@b*OP-(P7Dmi~%9D zP{53?ZhRK(%e}|&8$BqO#B=M*wF8OvEGT*MM2jCSF&GUO6IM{@1q2X=ptVZ5iZbL# z`Qd8NU|siz!?aCTkYpF-l1_tW8~a$1+1X`M-U!7Lz{#Q{6=1&xNd;ASXx%pln=;)9 zLWiDVD3~2R=ZEZdzE}`*5xv2t4vN5zb5UMzrGKyup`ifs}3ZXiew846beGez`pYuJZ?JbE2iIG*YthiD>vj$%l($(7oisMjN`I%}HIV5sx+=k!WBu{tLP*shmU#w0wEWI}X zC1XAJf^)uhcJH$;JREwfhS6>wK(t|W4CJ^n?(C}scs~`eHwm6-YioO}zqraY+9H;~ zqe5s2Y6EY8*evbAxVR4W;ctSkncYF}!^6eA*cr#-^;km`<1I3dNq!V6&C8OBsECQl zXNM6YpC_iIh!I?wBOtx_JVYh|*OSwce*xg)_HTB`wX|f@-x-lUEQm0>Cm%+=2+9Na zjcDU@(K7ULH{=t66|?I!+aTG#Y9rXns0TXwhd}hT-TpZ+`T0VT`sdW^x9{rKx;v;@ zMKnJIdhz7m!u(X#^X8B<@I1>6vrti(O2@B49|zy6(Jcf$)fe|ZGC$u`Epvc)&-TVA;`^)B>kUU`G%@>2XO+QK))S(^ z!OSvE90FBQ)Sw%FvH;sxv_giIbrc2hH?S10_<;n$WCc5wo`lR+cqKbfbz{ZcR^B+| zll4?s%AmHZB=rZ$Dg1ZCl1#6tv|==v`T{pH)O&Bu?+X<#pK3%5C^7UKJJl;d&Guo; zU{e@LcaYI&l?`;r*USuN!;ab6_#=js3~?fR_k)s-+2>xO`Rv`I*KWx{Y$?9lvMuUN zN&Z;c7xvOrIhb=j>78?Um!IHS%0?>Sba0RkjTV~Zt0}r1lTPIrSQXpOeUy@vGGrRkdCVH+zA~bu*h;s$Gz3?d($JS^*-}r-%(pYcV}l+TQmmxBq2hb zeJ-?WNyF-=Lg>KL!x!KJrY5ncTP-TQS)lHqJCRSbW!IS2eXrf-B(dWSE64RPcgg?A zFN%;kPfp%g!$4?*v4_EB;}yeo zB#nPgFOpWKib;6&t;sNlSl+$cF_y2tZg;H#@6Hv0)(LE>tGpLFp?eZE4S%}1tGSVu z*pamt7$(1;sjI8^mWJyzjwK|&vep-7o#D>yPb=cu-J0(VwMer>}}RI|cZ1uN#$$U;9-+LIuz8Ru%(mmOLhI){=koa8lUT~F^#XHgIFSA`Ue2Fh!C=*NV&w&in^6KVTetq%t z0pMSK25Fpt;9vG8*DEr$=-K8*t5{hCwkrw$81!9Z|Fgtaw@{e=ZGl&%K|X9#M7DMz z*_O^}Yk>}V5CHPz4d2~4i#o2f@)8q2xbVPRGmh!8;+GU*aX!P6H&t|%>eu_7u1;X0 zo7y{$6@Kl4jfv-R(^Ygb!y7Ud2p%Ee&bpJ_B8Y_WF0kwkNP&kD;;_W}6Jo;>;!ZPl z@|E-uj$psa5{$WDd}plCL<7o8d16;6E0739VWVS#aRCg!7v?E&Bm#pBoTfJl?diKa z%vl}>dDpL*InlA2WlZ9d&;2@m?YC=KxODU4pC?xzP!L0oNoHCbxPO=B;S6R%$hqd= zKIRe_-xmX^ol=NI>$|}I+%|oB&pD?K>uwA429y!;JzdaLR8-WsbDNGz(F%di4hOJn z$)1F*_(Bp++FA=+IEw&g81-FC%XXrRqE~%7pD}$D&7W_XncraBq#wm2XS0F;IwzaB zxDUXrt*0NuZ+A;eOJ^IP zil7klB)cveSr8hoXDr-wOrp@hhqRL7A@1jose!u8a!v+6z`_FU@wD1^kCq1~pnF}j zAhg<2*V3Go4IWzSx>H1KYvMl9H02P%!Gz@xIGK^Gh55(IT?|5nkTaZaAAQ z$;ikWAVB{@O`c#{4ez~YQvHgw#klY= zyqEksoF2X?d$FT91|DP0q4f*{Pu|E%lmzbt|{|6sdg`r$2 zEZjh}fBpQEbdPL-+yDCqs#9Piz_kC@&$;LIuYbcK&k0V8s!P_@ z|8CQH<01&R|7VlX&{e_xG>9S(g*W!^cF9;}5dZt%@OJ}LwpcrrgYbW}^BAm8 z8+@a4jQihqX3z(vrlwxpDR^GW0T!UAUYuO04y2&akDU`;abjT4J#XDFR8#sWSW)IR zz#xO@0S<9~13va9=0&hgh3p0|dxOPm2U^ZTmaP#IAJqR_53DdV4hpn_PHv`Fw&K8B zwO6+5Gxb8g`x}8M(^?58S~VM(tq!)|Kfe@m|BWi}`ekbmc#ZbcRUhvu)w-|d0$=ZO zB4M(Jx=>#^_~Lhd843NTvxQ{lar`#D7xT1pqIOC8i1&~2*eEa?o#n$`OO8R*S{OX2 z+FfCS*QX~(x4~Y0*mzf~WqDlvw3vie)R~d>^5r>0tV>ADi^^!ycW}gr*h2n}H&DI? zhlU=u%83{0?2{vT*bH!s_s#iRh>z>+oAVjk6Jy}K8-y}>Wx6=0#kM~uO5Y{*?Z=TK zy_r67)5uZ?+YMs&ab=3q+1-#KMZ7+p7xQ{*Uwj&}v~;e!1mn7KA*igZEH#l^|9jz` z4Y@{tDEl#4jxz5PQd4<&1q?h1p+<0fES=Kw^75o!9_$iYU{)J2%QZf4w%!OR{Q?Bt zjqTDX4{uJR6%o`kWUZ<`h-XXweK`%dpMlC*3+IP^i6sczh*w=S9hiG++6t02`{MD< z2OdzWDebH0QldVYXE>z<0fKRk2l1L3*?ge|NTA{#nn^An42-@v>z_H9dBv)JO2x)m zvf^L?Kne^Ij1M!bK&w5*REy=OF+I-HSD)QE@A^QraP#V5aG*D)#1#R=rN@itpvvqo z>`{4MsX40HH$-PZC093*T zu=P$UIJFUFjrqtS=p!{;M`Apr!C;D zt>oMAc&>&0!RN9}z8!@%z@aYokCwf_v>AMHk?Y?so^94QR@LrMa?Uw==-_*g?|f7E znID*bZ!OkY1Cok+*yn3UvFo4fK8>x^39y$t&w5JMlBE;hM z)$H7+1gw=c-=1Ce=VJbJA@*lq*!Q#Z$;_zEn8V+ruBMiCd+n`lx!J#b+BPyjToALR zK4^tJQt5W!vXXKT`@=<7*A@imMLh}BM=By4<~2NbZMzDTy}1`CJL^lJIkKz*Fyb2X zP>N|_v1|)0mS4|Y> xxx.weight + TODO ''' state_dict_reconstruct = OrderedDict() From 9c4397e06ecd98cd1416f2e0c1e2e4744ec2bf87 Mon Sep 17 00:00:00 2001 From: Hongxin Liu Date: Sat, 6 May 2023 18:07:09 +0800 Subject: [PATCH 14/26] [chat] get images from url (#22) --- applications/Chat/coati/ray/README.md | 58 +++++++++--------- applications/Chat/coati/ray/assets/2m1t.png | Bin 54081 -> 0 bytes applications/Chat/coati/ray/assets/2m2t.png | Bin 171753 -> 0 bytes .../Chat/coati/ray/assets/2m2t_quantize.png | Bin 134715 -> 0 bytes .../Chat/coati/ray/assets/basic_structure.png | Bin 41408 -> 0 bytes .../Chat/coati/ray/assets/tp_ddp_hybrid.png | Bin 111006 -> 0 bytes 6 files changed, 29 insertions(+), 29 deletions(-) delete mode 100644 applications/Chat/coati/ray/assets/2m1t.png delete mode 100644 applications/Chat/coati/ray/assets/2m2t.png delete mode 100644 applications/Chat/coati/ray/assets/2m2t_quantize.png delete mode 100644 applications/Chat/coati/ray/assets/basic_structure.png delete mode 100644 applications/Chat/coati/ray/assets/tp_ddp_hybrid.png diff --git a/applications/Chat/coati/ray/README.md b/applications/Chat/coati/ray/README.md index f9133b049446..eaddc4f9f4f4 100644 --- a/applications/Chat/coati/ray/README.md +++ b/applications/Chat/coati/ray/README.md @@ -2,17 +2,17 @@ ## Detach Experience Makers and Trainers - We can completely separate the trainers and makers. + We can completely separate the trainers and makers.

- +

- The experience maker performs inference, produces experience, and remotely delivers it to the trainer (1). -- The trainer consumes experience to train models, and periodically transmits new model parameters to the maker (2.1, 2.2). +- The trainer consumes experience to train models, and periodically transmits new model parameters to the maker (2.1, 2.2). - Using an experience buffer to overlap transmission and computing. -In this manner, each node will work continuously without model idle time, and different optimization strategies can be applied for inference and training to meet the needs of speed or storage. It is also helpful for scalability. +In this manner, each node will work continuously without model idle time, and different optimization strategies can be applied for inference and training to meet the needs of speed or storage. It is also helpful for scalability. `DetachedPPOTrainer` and `ExperienceMakerHolder` are Ray Actors (distinguished from Actor Model), representing Trainer and Experience Maker on the graph above, respectively. @@ -24,9 +24,9 @@ See examples at `ColossalAI/application/Chat/examples/ray` ### Setup Makers -- define makers' environment variables : +- define makers' environment variables : - ```python + ```python env_info_makers = [{ 'local_rank': '0', 'rank': str(rank), @@ -36,8 +36,8 @@ See examples at `ColossalAI/application/Chat/examples/ray` } for rank in range(num_makers)] ``` -- define maker models : - ```python +- define maker models : + ```python def model_fn(): actor = get_actor_from_args(...) critic = get_critic_from_args(...) @@ -46,27 +46,27 @@ See examples at `ColossalAI/application/Chat/examples/ray` return actor, critic, reward_model, initial_model ``` -- set experience_holder_refs : +- set experience_holder_refs : - ```python + ```python experience_holder_refs = [ ExperienceMakerHolder.options( - name=f"maker_{i}", - num_gpus=1, + name=f"maker_{i}", + num_gpus=1, max_concurrency=2 ).remote( detached_trainer_name_list=[f"trainer_{x}" for x in target_trainers(...)], model_fn=model_fn, - ...) + ...) for i, env_info_maker in enumerate(env_info_makers) ] ``` - The names in the `detached_trainer_name_list` refer to the target trainers that the maker should send experience to. - We set a trainer's name the same as a maker, by `.options(name="str")`. See below. + The names in the `detached_trainer_name_list` refer to the target trainers that the maker should send experience to. + We set a trainer's name the same as a maker, by `.options(name="str")`. See below. ### Setup Trainers -- define trainers' environment variables : +- define trainers' environment variables : ```python env_info_trainers = [{ 'local_rank': '0', @@ -76,7 +76,7 @@ See examples at `ColossalAI/application/Chat/examples/ray` 'master_addr': master_addr } for rank in range(num_trainers)] ``` -- define trainer models : +- define trainer models : ```python def trainer_model_fn(): @@ -88,8 +88,8 @@ See examples at `ColossalAI/application/Chat/examples/ray` ```python trainer_refs = [ DetachedPPOTrainer.options( - name=f"trainer{i}", - num_gpus=1, + name=f"trainer{i}", + num_gpus=1, max_concurrency=2 ).remote( experience_maker_holder_name_list=[f"maker{x}" for x in target_makers(...)], @@ -98,22 +98,22 @@ See examples at `ColossalAI/application/Chat/examples/ray` for i, env_info_trainer in enumerate(env_info_trainers) ] ``` - The names in `experience_maker_holder_name_list` refer to the target makers that the trainer should send updated models to. + The names in `experience_maker_holder_name_list` refer to the target makers that the trainer should send updated models to. By setting `detached_trainer_name_list` and `experience_maker_holder_name_list`, we can customize the transmission graph. -### Launch Jobs -- define data_loader : +### Launch Jobs +- define data_loader : ```python def data_loader_fn(): return = torch.utils.data.DataLoader(dataset=dataset) ``` -- launch makers : +- launch makers : ```python wait_tasks = [] for experience_holder_ref in experience_holder_refs: wait_tasks.append( - experience_holder_ref.workingloop.remote(data_loader_fn(), + experience_holder_ref.workingloop.remote(data_loader_fn(), num_steps=experience_steps)) ``` @@ -135,26 +135,26 @@ We can deploy different strategies to makers and trainers. Here are some notions ### 2 Makers 1 Trainer

- +

### 2 Makers 2 Trainer2

- +

### Maker Inference Quantization

- +

### Tensor Parallel

- +

-## TODO +## TODO - [ ] Support LoRA - [ ] Support TP & PP diff --git a/applications/Chat/coati/ray/assets/2m1t.png b/applications/Chat/coati/ray/assets/2m1t.png deleted file mode 100644 index 9281943570d3686a190f1e03f1b8884ca90b3f97..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 54081 zcmd>m1z45ox;7vvAxNWuq;z*F-6FAAbV)ZzcPdHM20s>+N z>S=IBJcWf1{12iNL`EE;^bP3@0)oh#vy`T@t-G0}jVS^Rm&DP(Xt<=nzfCL+%^eMG zX*k&}O=!3zX*fCEJRPL0_>`5+l-(qqWq6F8*;%B)X>f&|p{?oBHOhwOrYsz=vxg6{ zb(e88ykT+8-o(@fTxa5bbQK35>^9h)CLYIU*f|dG;$$~>gum~^2~|ta;}>wSAKhr~ zVrgRPbbRyCDQA0o8)wTKzdmVfZ)ay}?EI?>4ILfr-F|(UnZ3>NDv#fK11#iMH^EM* z8XMaDcvi#G#M$ELT7I76&x2hpYien30e>?G`|&TfhVYAzPdHf^n%KJ?-G6xMP;!p; z_TW~jL#0TV+Q6g-m&?(8(*NSu@89oeYIpqde}4lP{5zaI;EOXg0c$-zw0Cs2us652 zGqjQV<*c}cv#kyIWDXiGaZ`6oXU!A;)&l=#XXQEmMG~fRcK88I+lL2s&W;|io4y~M zxC{2%FSi{Y!S6FMbh0poDGK&tCuc`{Yf}k(8+%7^hK7@iOM+j3AIL@g$R2JO{vv{7 zsW{oYI2s>oko(vKoedq$O^+o7S079OhtK3Zyb*58ruMd`;NyTU98GNuoh@BYUc||6 zcx+SV->*A%12IQOL)cyL3vSq3+BrMHzVi6V%CHmQf@A;Sf`12W-QY5g$uyk2Hb7|NX7-04Zv0&f zybdlf%LO{%GBaZb|A8yNpW-!#{REzNbTjyrqx+A}AF0jh*LJ92Xar&h*GY>rw6Qd| z1ClWY7H$f3FYapU=xhmmQ|$N$TT2rYm@SJtnmSo}8X6tiHO$nG1rLPEBTmC33C{dz zDsX>*J=FAAA;;@K4kpJB1h*c#&auqCi}br1*;&~+c=?Xr@W%|s)*Y^4xXIX?nK=O~ zg$2!HEBJ5dk?Yvxf2Buoj{Stp%^mGsZu}?}xCQ)ap}@s^Y})(+tOCa(a{DEYa>B2& zI2l9XUICZMNk99QRQ`=Ab8#K5?Y|*{@6!A4%izQkIRuX5?q9z|?*Dp;K*@F{-y^A; zg{8Bp>J7ug(C7v-HgJp+5gz8F(uTH{HZYyZn%cOU!n_S!1N-4b{FRnI^!J}S6ev@chLxw6%9V4jZu4buum;9@{$`I-mF* zluM>3k4;T1Pagg{4#6Jqqr-kT^#5AoCnUpcl=AmXkuu}Z}w9$@*4A-8kzlP#0U@Xe}@?Pe=tT>Y3N@nLo+io&VONw zyhgk{ynm4le-kC)hku6*1%8krjF(>;qLu*jqe$R3zf+VO+VU_fWB>n4%XJ>B*tA7Eo{~jKXPyPPl`DazZ zFTwdw$I|b?^T+7<(=7B?Bq45i8T-e846LmA8IXbNM-lKFM{4?gxaOasz5kK2j_bIv zf@>eH)e`}W10KXqR&{Vd@MnrTIO0F)ZkozJge<_e02T@6K0ot-6E1gj=SiC3UqXa` zS>}Fs?qh*)AIs;2Jm5?J%f)eAlb;AHCyAfqcyVIAe<#!P>u~n($sM@VPqI63J3Gnl z!1e#vu{*yPb#ng>NcNuw-rVqP19v@mje5e=IN{joq^)tlPyQ+2`>O%f?~Pu^vHNF* z|97qY*AVHr@P|v{q{#kb7?k@*#Pma~`XwL3P~d+Nul{xn`o{&#cU%m>ZS;561|0A- z@~2GnM{M;EL8yORj&R19{aBFj9CE^N;qHIZ)!`xHPl@qIZ1qopsDECLaQpl_1bb!QWcsJY z2ro)bnB#v6;Pd`SX8t3Zr+-|A@bdRN(mV?Fe}@czq(lD+&C@?E!oSlDf22o$+B`i@ zMkl%)|BUwAk*%HV9{mC7f4A7*Nsz*k{-5DS;G>Kuk`R256ISE^xcAd3&|?DrhbhqC zG*5w={y%G;{sDC#r%rfN3$A^*R!;=1|5)>s7hb^sl02pU+UDuMgb4q#%>7=%9t-4e zY@YHSL!lF4<$ou`0hjtoh67%}pJX`T`v3c3H{ZV}lYYKI0qm-{0UK362A8lc0^+~M z9REX&?O$zSuz?M?iyInS!)OCL6Zj;fiK&^P%i(za51jD#Jr4gCS03+|0efnGJ*4}~ zgH7#Bz@C7^eJ92?hE7gkg79Z8Qh4$_v2Oxy2q%pLp7#HK>aKEcB1qm%w*i%KeLA`WJfmFB_AzKyZM~u)&5} ze%WmVJAeu3@R#qKl#X^Y!4ALQ^?Nrd{crSecp5$F;ata{@2~Ul!>s;CcSiBTr!9`H z_?WKyR~w_glP^EL9PFU^ll!86tcl=N?7yps|JA-Ikmr712@F%he|TTie=DB-l3soU zH*g>S^U;i*m4lu47(f3r!q;(R{ky{#EW7{l@O60C|K;!n?>?UlUvTXD4+~%1e=vUi z=K4P$zZqo_keLlX1!pbACJOve|CO+_XNOz#m25S3MurF^}( z^QPfd=i}FJ%qlw;_ouv;V&+F37neF{53Xz`3o*V#L83tT3TFx8-wQE?J>`L!|{LHSZ(%E^FQAGdqQdjeuB(OaXB)2B-Z+ z9gwgKo>IdeB#MZP9OXHUt)e;TV{O$>HB^_BuR<+;NevMz5DmPZkPacr%Hc}_Htd~b zwCUAOG7|RB<+%NxQz%$2XlM2pg}5$WQWHh+)%cu?h=_fIAL(1u)D6~_Krsa5SlN#m z3;Vr!A)w1=;xePl<$#^bV zjVCceN#?;Nvy3)$t3WTN7ns*j45(u>1J$E3wZ(wovx*}NdWHyuWyt)&S@Jjm~W7eu`HD
TQFknXWZIOA7rooFHqc_a&w_CD}3de$Ub=o%Utwj0n!2xW=l8RDY) z`-h<6hd@hE3;9vd^AygKGzK6nNlUZZ;PM%E)ca@Xf=@$Vy*3?)vR)v%u9YEQw-vwq zSy&~(1E%Z-1)!idq`0!GF;-p-$-)Qe#+umfq6mR%52}zP3t#Ty;#Q>Bg(hpaNZ<0y zX79a@1zvao%|bDy=WQ4pu3p{t7_*W{_^dA%XUkiP24r#Std@LeA1f`@Gm>EXFrbLX zyNK36xumfOSgUTzEynLdDX@tvzD5tdFW8K7$U0EICB`mJfe;l+f(SDLd{G1{HAF>! zG_0%X*Pv_33Bg}OurNe3Fz(2zB^ZlmW{TVmLrp7Ue{%RG570!j6ECpeK7)w;Pyn`2 zCoKzTIV~?0sjo_c0qhcBkKoVW6ixK3*8Qp%?8^y;C1V3KT7 zJTLSv`;;-e7O-Q}aV{V8FHH%l4ARJl7a|}^z?OFIl_ILWeg4JKDpJH?X!|2p2ipD9 zZa22qkVa0^)RiK{7$P0NagYFU-h{C$1-n3fLoTUR3h%WXS6m-=zW|>PWtf`b@QbXM znwSEe&OP~NuN2N-X=yVv@;v^iht^0LD&4(C)Gy5k*Qso69KbVN2l0CIygWUj@;whP zGPcoWB12Sp!sRD2A(l{q_cQx`ifO5~rd%9X;0p%c84H9j)A09lzo=Q6oOHsdQg zscvX^?lbuuaeOsO-+&88F(*O{XxYQ}wyfGcC1CT(3dH@Akb+4HA~msy0EVM$T$K=p z_u8BuRZA7tuG}xy4GR=pUn?fqaPI5BSugc!Y3p=#evhtNKrm+ry@s|^InuGFupgoz z<>D6T#*GS7P<+&)qsyaPU0Ja*H#grQz{iiB8>w~sz&GZ-dY7faKfF>#v_xHKd0dZ` zm38y-l`AnLA3l_RnVLGdA06%JAN=h=tFnqw^DI10Qe4dz6cXAjh2*q7ymjl=TS6A? zY~h35RlBast9LaTZ(|lnhZBDpA4hyiaUBOqKtRBGZ`(!CY4Yse#<2Z+uZ{WF0YBl@IE)QQ9Ti6>{(2zt5U|S2D+K)klAjdc`X` zFE4LJ{Y)5XqNuAYukGkFtvctKo<0mBR{Z-+8g|_<0S-1y97hdLEyL&R5PSZZdM z&sHziTy`NMirL{ z>-WgYM=GG8!1b3^8?B>I%f4Tx^<+|mI6QZuEsm*9$ZfIsSzH4Z_C3+-1EqT0Caw3l z&CRD0m^4b)XfkVYYch@(m4XLb6e+V*cIDU2&4c}kcmvmi`6pdSu~o>L$Vkn2=lO43 z|MVtB$SEc!#(*Z%xH;0F?D*vfNRyYY%VrBtO-@$Yz6eB_9dEotbmehx@@YdG?1%UP z6YcR+cQ0_=URhbG!RhY0pGdTN?CuDt=V_G_WqsbffA6sgbe-k7-mkHhK(0%1-3__Y zZX|K6NP{i!<;#~ul+cAYb(+{P4am~^FZjOBXMXqaQq<6g&Q6xaqO#|V19e_*=3!bl zLqr?Uk)Q7@3e65z*(>Vn>!adatO`U+z&#XqLmRMoQ`0lk5A@Sy*Vi^TMXuNmao6Ke zOD8yo8Dw>TeOI_b-$0c_^>K1inlUazf#CD~a+BB5cOP(>=hD#7@K8}*UTEj{w!nm0 z8m|(}J6OG^*?c+^dFsz$U|8>MO{KLnMz^)Lrjz@A3&0{5R%nSN&kG}BgE%^JK5A${ zL2^X>N_0^~M5Iusij~KHGgdFUiCt;Z?4(8_D{M4;UY1N`Pr#`&7Q>acePsqB#FyRKD=d3+JIby4@}ur2B=Ap< z)_IM^dSE-l<0#M6_232-?AKTgLZPprg`P&#&bKe^H?MYEf-%STkgyhJ_ z-6cD7Q8b+yxu>_+a-_zk+<6A#xx3tA`L0;&Rpb=U#!$5roecD|Dgk( zGYvAs{Its@Q+8pr+_JZyPBHl_SMw{xQ>T>`6|sPqzyH$zu6Zz^k|I44A&s^h*zL;K zujy>hzrCcF1;%kz&|~Gxb*dpNs0{b%!vOeD4&=c$rz8rVi|mpvXJ&^n|jn}w?5d9j|7D^L-n#zFap8SM-s+z>>+HQ)K!u7&z9Xfvggy-9{OjiAcaW6K;e0NyL zOI$w!ab7&jD}7w9B+>f6WSFQ{nwXpOn)vwQW(AqoJjcy<%ZPg)?B)j_ogSu5)|IZF zA!_A&h@$qjKthqVS*q@=;`K9_eJItj4?II4cBIss@{!6Fxs74!E^}{1+pY=9Kd;!n zrxAzk=op1RU3rSJCqA9EHNAM)A=c3auX?8{yKv#%h52XKUm$JoEY%0yJiyfZz{<=# zZzV4;Z!2NDEPND4Z1F&%NIig(k&!*%v-|LMWqFyQUr^X8G%Y;!-n|ACnw-jO{ME_a zTLsX%eT+7pN8&6s9*Z)N>TTH9(2sxB$$qVvJKLGd(5F4|z-9H-S%}BBN=0Q_oRuyF zf~!`f9?r?hc`K_xz80dYy7eWjI5{~Py-kOM`AAvpG>A5vNzu8M%@B}v3GfLC1xbkQ z+V8WJyDn&NkKZAew@rND9=n*l6+O$nWN#t+tW4r#&+M7b=Y&t6KhMM{33KpEfS_wsWj{9Bd|iV3s9HTNSP_K-B}rctjMbNLv>O|SPAvy~fs{VWpj>{A6%gp{7%6G#`3Sm_@pzK%wzVyFoMR~%ug&qBsqx*bQ6A~)hDs1g z7E+44lfz=j{7nS(yx1w3*Hrx@BPrh)F5B7JVHcRKQA-wHehQJa($^^3J<@L=Dgr+~mMPl*s!h_7_{;VCM$R0#^m#Il6_aiy({4yyPZcO?gj}4Rb(m=DP(Ac?aLIsv(GULpX(hcnuK&V6ygip27Dzp zcr+}WbB`_wCu~e5MubrR^QmP%=A>)B!jqBm)X}r<^{*Zp9fCF#n5ze7TDCj{_9882 zPea=~FgGcT&(cp=9Gt!Hc533wWOw;i(R9l5k=C(+Bw8uftVr6kM6WK@w|shXV9kb> z8PmiZHfVUti*R^xK#_odWAg&>zF{nd{?_c3ZxY(PeB#sg1sGKQkZg`~u=qAebeN^n zVDc;`;a$q*8%?3w9h`r(EfuI{pjhbohOx@+)q-z?t8a}1N#}V!%~mPX7w;+8UT4Zq zuWc1s%p?aJ;V3UG_;Q3YqHC!PR(jM3RM@=J49;dv-3xeukEH6$OaZwjhi42+ZT_FJ zVWw2JAfIjZxnB`cR$j5uwLaA=GAqme?o+neChN&F61P|mo{A`FZ=WuEq5t zKCL^?G9m0@g@R6{me$I%DT6@O|KS$LVTc~215#t^Dg#Rp2yD}+*JmTdnQWa7c2MEo zYkcw2x>b(FcQs@ycL+&U$KTPqItm&Lo$&c+h;y822hkC0u?w~x6}6t0jy=6fpDTW8 zeSBEOARFpcyC1Ys5+)@lFiX5T?2yR0tw+q~q|%K8&6&s+&mL+kLmWgq22gF(*NLd& z9?edsSfx3IzAU(7f0;1e+frk7wToGLz_0e|;G~kg9A0ReVzM9|baT}@m26R5tiQ}} z-}qq-IS2^k?f^Bw3_Lvzp>ikA&3rIiR*h`vv6jh<6QtcN$I*+DPObWRfa&2iCP?(j zkW@E{pSq-IHe=T+j2l4%@#5D${&1x1^3Qx6rW*(uf z7V@t{*A3`lW%9F71EP&61HkfepF+;K!h&INcqr73sx7X{aqXx+L_pl3y6za|X*VW> z|Djl6G8-!5vny)^s-sPG=GE4S*x*wQyu1oJEW>WA)v9@5Pd+sfr*G-Vmp$eC# zF1U!ZJoO4Xz@@0DnBwE(W6<&Xwa1smk=i(2U0rkxboAu*c($~~@o@)A8XD6o?UGR` z$@)Yq$lX(iYEu@3nc)%^N40FY2yfam#&6MX{bp@CB`;azYh41POXukh;E6qJz4aQ?9W>oYd^DfPd>)9CMuq0yBf0mk@yi10&M_bKB9C%o^ zo|gh{Nv}(2Y2z5xramUT&M;wakkzow79~Gn6BRVLa*6mFN6pN` z6=t2p#DdNifG!-rL8Yx$sqz-%&`{I4VdgWsXGj>;*8a@Jie=>5W%=j*+V|oOE>fAF zn;)_)C@O&DsH6_Ss`39$=s#Kgq#+~x;WmX}+|<83tLFb}rp*k*fju1b4*U$#Gx zc}<3W3g#a$r+SH&a4mbKMk#qrv8A-ByX2XIoD?IvxvAr-CcTG#*oq6F#L*>Cs)qYTon zu)1KXVHsYgUnfsVNeQb@&&x;_UE^gx4d;wdaM_b&M>Yn`ov9bl`wKqA7J8hCDSDEG z6mPE=vx2coPr1OUCx2NkV6>uO__IC1g>v66tr}OMuteT`#fnotuSW>B3Tj5%I@!{h z_#o3?-X}qygc7YU^@%=~=%k0}0Mh7SqlMg$9{_XR$5$XEWFo%FsopDcEo zk@799e-xs197`i z)6I5!nZFKKJBh5wn%{3gfoeURF4f)Wb$6CbU3kUF;0B9MrNYi?mxv5*cYO1Pl-FUg z5O4=@w2Vuo?A5X(-&!Cm3&2TIRP5s`4Www?wIOkNehND6@Z5VnwhEa3!u)7!(c zt(HxJ6*9GSLE>WK8l~I7%3)|}>#V<8doU!*c7<4O;z}FC2Y@l$_@>BghkWi*-MM|9 z`^{OZ3aJXKeyq`F9#6!fA~)vWt9C>H=-Capv1@X2a@^kEZj6s%5`R#d;rH5L zUSAwZy3e8`4JeYnVZX_a2i&G@v6&o28fB>u?%l(dmTWo8dL9!FdYe%bbhFVmTti-AjA;xR=`cs*ag&hA}We=ONlvPq|5MGL2U(vY;l6dIL)|gSh zgS~<TOICdRp4snORvu<1ZT<3$7*c2J-h0h|tl=SoGHW9b|F< z)Z()Qu*3s7N%9NYY@>i9Kg1C_)Sw_{j}(I?hiXYHi@wZSNn*Zm=_iBzQdeX&inn)m zT1vH^(0+;2(C=pAbkU#*RV2*y4higUY|MJK|HP~_Q_y7=Rs4S0QvE^dsMk{6)9mbQ zWsg319t0#w?V0a6mm^*lGenzTTuO0OHphVqQ^1-#k!HxKLOcjP1W;J#@^z|8FGaN# z;a7dz8XM9%*rqDj)~|J6rUI_As|fy|+Dw0WGO*PXPmW@Qr0D5|WIede8evnJa#_n= zS35waX-@KsQue2ViAS1hmfTm9_4=HQ=oDmITU&2*N8Fm7%wBl}eOA|H`wcg%VAp&VjP z*xaYC+qw~(06~D}iDcml={Y#U35G^#&s7smM-dtD@@v`Iu?kc_ophAIENCx~&7q0b zEVf#a&Dvt+HBulCpL~uL9UarQz|ZC~^(I9;E#APlTGn5FfsLL$E>t{=Z~_-C@=_8B z36AhhgDG1ks_m~YkX#JXzI|L55L~$G>CL6BWI1 zHb~pPp8NoIQOZEaz}b`ynmXYWZs&WlKFqE!2OoXvd~3I#Np~)sWbe+4q*E|Qm=A>o zw<*Q%HBELX)EFaAQ#VLjDi1q~Z;qLuV9AP`(N}vRxi5JQWy-AUjl7bQ+xpsC>?Mdt zkskn|#xzi*(UsncFGa608z!YZ)W6RxytUq=c;=%(SDJ_tEaG*vwl-wctk@;p@w_7~ z9T}2>{rv5#MkD?sZ=(jUBSvh;y+#R{P~nY{b+O`N6O<`YCy18k65^Q>abVP*TUzq7 zm}rZ$^;(-&xdd@>f>bX&_t~*no$M6n^Vup02ngV|8Dy8Eashk>jB0NLv1s9o3LMA6 z3Mw)H7!s5Kx)pe(K&95yzejLI_gyfGtY`7v^Sm~LcTL;x6EJB;S0(m%og){%Y!3*$ zWcv%IZ#+mn=I^xzU4t_AuMGx&O^xV4v_69Z|&Q64BSj?wCa^4tw(j*&6 zDiV16?gjDcY7cxKo>0YIk3hm=ebE;#)uR%paNp-!eUfxV&a3oBd0!o5e2Gi7P~{0{J# z)Qc^)hpfvU?8WRaRE_n#&5%%{1sp!tjtQ2qm@IS+pFm{`2ont}0;&BBVlAw%Fi}0Q zD8)!^Pei8waDJp(mqx-DLYRr%W~bzu;j;iY z&PNq0-J6`8e4AWgJQFuCNVVy3u03aP_>$W)m<+jh*5mV;V{cJEJ_tVm$gd5$|&F28R4G4IWGPxu4;QL zw4RtPSW9Mbk*LX{Jx>CJ(Hg!~dfvP)J$aT?E0$m-iQ&D=&3Dhq0Ky>`a%1Flxy`su zp-Pcrh*QG0S=D3N35(e>@J*hcYtuWQMo9-UrNXAOz-O^AFw9(Vx*N%H%;$@uAG+T4-RZ+ZtVzEOb?zT^@_<4V{kTw)epI*l{N!P<$ji5f)^XXUFz-B{IG{yE?|A z*F1?-F`GCvJ2Xnwfn-ei()!o^E76YHo$D&f{<0G}b>mZ0RV(1d6C`|gHji^s0mF?l zI|BM{l#**XuTM9No<_SR&9Zxm*QjIXh-vii#1$QnUC}ikpo(*F5}sYq3Bl*!5>M|E zC}4)%oI~lQb0WeR-h1!%Bxu%;!>u2ua_51RoD}4l)EHy=&3iFc#j;YDDyAibUjm|h z1#q}*>bJr=*CsaB`*sT^vFIRI3m3;W7UIjVZ*3d3D9D9puXIC)7?7CVw}^$My_0J_ zM3B)#vO~k>r{?E(x1M>dCb!(ZpgI}Eru+0>b0oQerl#gMS{z&YRncipGo){y)y$ly zQIU8h4vDw8kua$An;@0p`Vp}wgm1Frx{GefGKN8xT{tc_bW;^{NN*-XOW&CX6f()X z_)R|de$HrM)L*YT;y1rZ-Ir&rGhWy8MvteZMa+;YE`D%uY|O8szWyK!ko`q{c}nE3 z+d-=ZuQP#z`U?5J zlTd$!y;uJxl2oCEz8i=HazQ~sn80qw`!XcXM7EH5-TlUq4;b*+m>3B-8r@hLOM7D4 zPFV5e$?AJhCq)KZ6xD5^&4FQ-!)i#9;2LpV5oiZ=wFC|=pWojE^oG1#mRl`1hs7z$LNQ4T_~?~74UCLgepA2JI7X5W zdI%sEnDjTj@A+a?k+_jIDjTV`D1XM!Epx!YModu=^`qQ+hIUCtnJamg@I|r==?WiA z*PF6D!KjBoqagzoHkq-=TP-_5v$(y(B;O&Qe*Z;RObohs|GnXUVxEh2yLf6Fh0-bH zUMajwB1%<|)qGW12}g)nKJjE;#uW$i{K80*^;zXEWv6kUV5nTe+i66Y2GqZ6V2~NO zRrO9YPdcIUAFaG@4SIW!gWjQ@j zez?=0!kuf9lFoSxBDdAnLPi?o`?kBS`qFbFRlKp~c+le;`I^bu?$o2iVtv%2vYMEh z8jvngA87CF#B6PEUsW#ZeV5*v9_;0-)eayji#x5i;w$vtEK(Rvea=Hs*EV!i5xK9;?{B z4^CZi_U|9(SnqAGGtkoqdwYAg_V@SSjz80D8zAcID-0UM5WtXtd5(w(vMY(OTs?Nh zXS>|IDU@LI5*=L`W#&awJq$@Jt=Ac8*FfK`!gBM4JN69|q?>hhb#w#8TD{iOZ!<>0 zoJ$^<)ez!5SF9I+jEje7Irpxl+^8wE+$o&C z5;AFsC=oe%no=UK?JSqeEOZI$q9A|I&?9vc+J}AzzKH@(W<#%I=v2XfV|=ua!TpU9 zcV zWu%PMfIpbVR##7N(|5UvXkvS9<}=0BX3mS@4WRF40b0(RE32zQ_b)*yAr}`)8$3~f z2%q8rn1e5$9(CtV%My_DdZ4IU{+;XhU!y)$_5r;_fz0jL=OupNZ5hp?m6G6X_n)EY zh&4E%e%sngNfLCix?-~|6w!6jC+FF^h|{EkFSe+U9-z}AeBJn`x51nGmQ8he3wPR4 zu}1DvHK^?ZH7~8dH&1)*8Q~6EX$wOYVJD_TdVm=P8??ZtL!MSwtB8ss^q1(wC-T`V zf|g|tn7hHNXPxq#E2(-6`i(VORkMI($5LCX&;o1Wof9lNAMui*=(=0ETrIRn-L|p6 zNP{W1A{4N2et9rT3C2P&GvEEj+z*(bz}fyH8PF_`V!1@xNvXNii2N<=&BAci%|8HBfD~VtluHWPmuOqjc4udD^Q&Qb+pDdc9f=v@ntnfX{8cdFGraiFF59W5_COZh;UbcLYz|D4BI_C&2F6t(=jpe|%Q%J6QdVF>g8{HKjgO?2La`{)FCl4^ z>F4m;4!s54+p~!%QC8SE#2{O?Ui!GO;Nt_}omjK*EcfNhx~)PJEc?Jw+RBke#2UDf zK_piesbAvgXW36I-DRdW@hgtA}nERED^e9LViKmALaSClq@ z6;M!npTH}nrBuc|rY_1WDiZp3wX+_acZ*xUF!xYNWUr5295%Zm1ZGzd z-du|#l>@F^tlZ6Gh$Lee=kSQ)lPwk&Q#@^RtYL{|3HHF7;dfkS9rW5F^}=APijt=h zn@BYbx*zoVbuY7mPPqkTuxW(minV+V6*YB``~V6D;dyS*y`!M2wNoDAbML>I)L4pm z-NB6RBdET80IjEY`|JfO57zw=&#e{H?fso4noThY?1!fVH1s`!0&m_756uTNPSLbV zsn4hd1geWnqH1i1dBGgdg}t?2rFWWnqXJj)3XCKl`5o-zaSo(=2ems6J+iHr)5Gx74OPqV#gDq9|-CAB zp&?V3a$!1?e)f=cJiTCHBBx@=5#1i=4Cb|1aRW)kCNG(lu6(I@>^@L=&FdO~F$P4o zrcqi$o;^K1x9pdwNV)}Tx=kyCR3*!)>*Sw$r26f95DR4c2p)4PW?Iy7iTD3%6 zWFl6wp$oMjbk@xfuo--qX>qBmapS{NhtHH5bm-!~RIfp+DNK?>Mn%ipff#AHC7zes zcD!}tp{BWThg`Xo&hDJfxiCAQ;cqojTKf+c_}pfLIQWdk#gVS8v{J{WrYeF#&(pQy z*~F421DK&9eG+$q)yrtLpWJTkjj1orOn*~aXCKvcWf-(`>~rkaSI2_E*I^(ceM#4T zC6e$88JS-u?xuU5VoJ2``oZ<~1D@AZaSJ8{9paHrs&O~sVq*#~mY8I`5&`R02=;Yc z%q?}tMIo>H{8>oT|B+~4L^GgQiSWFJR3r!+hHxZQiXBF&pWs$KtH`SEP%==omaCkZ zd>*>Gny8r&8W`&&q8aP^^~D*fD5$N0C5D_zwxi^Ot>9rW@c1_RU6$9m>A5`as5aqh z#|d-?Zt*shD8``Zy-Eh$0_y?Xx>c<1xiX3Dp|aYNM~`0{w?w1}`t5sbFvQd^H??;_wHFS zY(8L`n)@Y{=id>@O+asLtxjPWx<}-O7M!{LE^E#&XDa)#{T2oxxghg;plqCcoA5Bf z@ZyvOqhz6v;E;ULr&GI|iz6f*LAkeCODCt&8^0}o#^h<*p-7iS0<0h2tXDR4#|gUx zs4PC5uwa)Z!pZ2Ve<8@Y>L`Y?*`e(G;;k5y)OX6m0$;sMi*|%GIU~K1m{{14rYaZ` zC@}gGy(-(Z;Oi3wB~vQCHPwgYP?Wsz*+AN;4YB@BrW#+Fst)$bZZexrg_T;yMS~$J zqf9Why5>zUqmKWId30@J@O~wk-V{@H%>kf`DOnRfC7<8trGu|UEPE)=Anq~axpR>n zVB**P0%PK*Pk5zh<72(IhZs$5AD%*XM!O+~e@391_SM@*mLDoz7lyQ?!=R~aPr8Kh z&|UZ$H8R9eiaav2lcA$zQ!F;6&j%txA8UTLpq%P=jG!|hg%DQN3mB+X)fh0BGO|Q7 zN}D&$`aYsEzz^D%WDKj1F8o$uFi_RsHZGvs|?hQu9Q5u<2oCtXTACNM=+;x3<>lAP6!g0@RP z1~N;mC3|omr{R+%MNqGQmd|08jF!d3z`%nvr21@V<&>Us4e-B&w6n2*7g~Le?6Sqf zI-96AE=fsA)NZJ+ID&n_CnQ7~?2l|zyU0WlnbJ~HaqlmI2WCA9i{S4VWBri-MM@U8 zAX%oXzL(zzg9vqy6Y))vSqc3_EP6~Z^!7TKL&h+73N^n=EFWB9+VD{CJ@V5A|N5L| zIm}(nK`?8rdjAD>!Q-#*z0HH6Np2ZEL=;qc$E1PDD%kP$fy6 z#TCgy+&ZMB4*7;cDm5?D*)?$Iko2)*C|nD0jOe3iMvvDHbncIneMBEt_6>9%_KDdF}1OGN(RMZomMHf zPH*O{?3cW58W{_GGTaPBsg`r*n?cRd9hD{PZ_Kxvwu&JsO^_{ot#KPstl=eWqsLj& z-T;4N7}~m5ua#c-jDVqZvi4loD`8uv_Tl~ z>)H)v&zjP{wOgRuJ?Tbz@_qn6-gp}_p&q7Cvwkkh%3u>!-B=1>2lZQJ=>@}*Qe|Oa zi&Z{F%=J(oFw<1wPj0`v%jn_3mm@Ex$HJcEb1jOjKIL(bBIKjf*aI6w>;hVekN{HH z2Cl3z#La=omkb}ym{9=~K>Kj0G1Xv6=|u*~y_?#n-z8^x3RIH!&!s_ck8alj$bFXP zw4*b4bA<01(d`VYcJo!{7^6kMvLH_tv85Z^!A4P}BJNNs(>h-4dTF1(|fQio5 zt}Zvk9#1O)~LaymIWG7i1(c}NQQA=9H3!*5z%u6F*|5O7^p zI7fyI3s7FB*dO!qT&CZSgQmRvLr&&E0??jvl?Ip)2`fl8?#%l_D$tTH^de%>Ub6w^ z{6xviX3wcFUzFY5`S-Tx%GN-&u6pw{W~h2Yq_fZGaK33UMUPKbq1EHEF|U;<>?63m z`U$kpP4ECpCY;1TL(2fu_4rSwK>gpO&&B>U`iuemXf1ZZ7};PZ~opiUPdxZK8o&HS!BD{ObW;{g}ScrJ;Y zsfyd@W=zU$_k|&zPt)9CBZ?`4I2hL=Dw~^{(Dv-do?io7c@!SW(sXedy+SQi$vSOn zVuD|n#{}`X`}VE!3+V-myr;P!6Qh+GR69yNjG->DDH8T7x7Ty^VQk0c!ql5)7v=->1i=rlNv17W?Y`{KT$Xg+WzpSamAzYF>Y03wnxy z=lfIc1licwF!R_9DnGoMc4bIst0q&&t7K>fd&SOecseCE_Lc4`c&$ciFuHA{m8K;t zQvMn0aA$Lg4LAE)|G^uf zrMHuvNnw)aa^2D@*~066smR!d7u{dFNX9%dA< z36KMbnDEAV*oUbT6-f5wsANgc0Ub=R>+IK1MFd%aA)qyB>58g>_x;JI8>D!=mkGC% z?4L8(Le{y~1ZXN8S>1`F(5sl0qCQf*tG=+e=mK1Fw-%3G2Axf42g)R^K zE~&s{dhNp6=h5@sglS;xbSfNd{h~<|aGGSH8lqhK9DX=3tV{*M5XJ}6ci06_G7n_< z>}Viscfr8#qWUajjB2mtt5@`i!glOD`8w@Ll6RBD(Wv6QeSFG%Ry+B41f8d^C$IAk zY3saNcKwzu7jp~Dk5VzBzar9i&lps%b}d)Eo9*c0XW6Z2D_Iluwnwt?>Bt+TDll0Q zh`?ffsp68M?$B;TV$AkOjH}*`j*gflb#;c7A%LYHdnL+|EDMV?n}In}v+u(+7(Cba z`d)k$+dGS{6b#+tLliNQr+GdxF~PG7kX_0K2rbB(A5$M~;}21(bu4@dq<%C0sQ zjb%6i>ie82TbK~jhVdzrZvTOkSA~;k^|>&~eKz~etDrfQ@X0}Qs7af&u0I~&Bh!fw z1E6NDSm+iO#OUAKv~LFl(1?KF{_Z>=zjs1fo|2}5k#W*}LUtmAV5n16Kk+~hl}qEh z5veBzHJ&$fZ`JFpabfiMHq!N`-mF*7d#JuP6Ue}A-F;xuzg^N-?BKKQ*{@r8K{M`U z9t*bqXC~L-Jp2_CE~8~3tCg0Q*32Jy4+o3mIuw<0qM-VwqM{8sphMn2`~FE@%zF*n z3AA$1^h&;jE=OpWDQo(O0bPpsK@c;B=$kzgjzS+W@EfaITVwEHmWXLG9H$^N#mE7F zn4+Sl9&nzpVZb|uJ79G#H?z`Wz;>F+UEmn6#=s^Q-&7P1K3bLlh9E`>xL_;&trOAL7#m_Z9%$BO=K|VYj8I+R9qdu z(l*sjktKDDp`F8`JFzcq}!AYT3IeNb+=FL8F|8OvrV< z0c_XXp&wBnJcG2MczxmwiJ(xt6Q;&gQ5pj?)a(6@=Yp-zVk+>l8wfm>>!|%Unj@L* zy%xcCIeEP$cNOGiyV3>gc`|OZU91SkB)#ZkEV(>Y-?4i=_~8WkW(%5F`K(>9br>|_ zxuYOL6;W2shdjj+QL*e! zMQ?OHfs0B3kB8o#J{`cl?v zt%uy~fq|DY#~4)Sx)p}99TQ(PdX#6Hy966snE}`wHoFaW>3JrOJV`)6E|&cW(m?A) zVPyk+1^6t0x4XW&`a3R3=jPDd>sCRypgNf#ZA9v?N_4BDOZ6$voG@!rn7s5=qrkWHlm!66?yg~H_$fp``qVK*; zOk8QjacdXOq5G_(J+avvYimuP zIAc81+H^J6i_IY4n+~E<*S25odR^o(RFN_4>htoVtu1rHthxFT8k<$RoNTlvB|60v zyuzI=9;I_`8R@eFA_p9rBj=*Dckg2^udEDhfek)Zw2H~o#*b<*;05@jpmwAP@%aEG z7W?4A7A|rjivRFeeC&ez4x@>3Nq%-8R-z)r){C^v#5;^Qkd~)#>96p_=zABHXie^J zM;e!}H5f8ZeG=b9M{jNHkE8N(&LQNn)>Oz&G&Mv&54991sF5XxmT0@^fR2h!{q}4T z0w3R((+nR|1JF28fk0Hop8McMk_tpS0wAUlJY9Tu{B1uXDu}zBiuydhs%AT>UG9`; z`Z=Gm2w+U*H`|HG+jq_z_E@sf(O`UvEfZF%Ob;1b$QG|i6rX*Up;dNcHvsHP$JMBj zCAhCq>$-sX;K2jE`@mq{gMPO818ZqF?1$(9;Us*cBm#~m`e4(ft3K$OxD=fakOT7* zYq*-i8%&*7?W@jQ6r%asICP5J+1DVZMDcEF#QfU`&k7sXyTh)dR2eJlb7J-?E=xOS zMs%!d$4oF(2Ss@)A>`~{926AS9RO*|)cJsm?jt~f8t3H`P9bfh?Se{ccX~J*G&^$W zX#XE!Zvqcx`@W4MWvlFxeP70wqU`%Nc4Lqwq7+kz?7O5aNtQt+#8_s8QueJ)LXs_| zP%_q#edj$#&-eTM{{Q!V|Nqa&)2D~r_dVBj-Pd_u$9WvbDRNgRZ;ywD!VOZ{cu@5d zsLy0}Qd`EMXD6p(S3ShPjFi70pJ0xgBV5E%x@sy`pWw@{3L!p7T4}u(+NHaD^2FG6+Q-HICu1o8;cNR ze*lDw!aKjYMbd7msj1NueqUtL3Sbb6(r|X>hpK=D27_p6ZS4iI?j&d)@57`@K>i=F zw6-?F%gft*CpMf?he1pNxcpdbmiG=HD)E==l~Q7CUr%1qEd{<5Osx6T{I?2&en(q- zFRyw3xw$zrpse*lxpAdAVBT>9P!`2Ya9QU?1`q~Jiso9EJOb+SBB;?T;J>+=x;n@A zA3mt0V6m*)I&06T;Ph)u@i@7g%0=|1pe;&7Uweq3#3Ptp>p-9ibj*wd4Q4YlF8%%NsK6aoyiZ=ypdL`;>dWE4@?VXV zGRz@{%2d*o=3Okhw%alm-8*CiS?q6VIs|rA;)Q}>^{=llo_IPWLoW;|+drbM zOTwQ-Fh4ChP#$*@smrf?j^hgbQxV%YAK+QACU{F-$Mg651v!jCjcpu{>MoVX(C!hF z#7rcN>7_?TBR?{vop8<Gy<}^I6zJ#bwwyhf_4jo79?sE{GZhn>bUvw*K3CNV1L|!TJJ5JEuKSPRF~jDX zp$MwDZe?KI{i@hkDm)WSD$iCLk8YHN?$J_8VXGL0RWZ)B8n%+zrXUxa|^ zxTj@Jc0rC@%`5%pZR?(Lf)8P?&ZWOk0ITb;ytpBEy@!jxqL_Qbq0#eTP;h$Dn`cy$ z=HE4Y?lxfbk5>w&e&0(a(oLHBFjcyJzTp`DSYmeDJ0w}-QdG_PHkGIjQHzdAY({;m zoA$wS+854C!9P`J-3nG6c7j)AI1(nN7v6 zJ;jc~&df)(cJgx4{BtBI zB$LmsvbCeF^D&;+1oy}J*Ug3X#s`{}#qOz>bQyc5`zhQn^`QxN1%uk%-GIxg9O^1~ zc$Y68EgOs|M7Sl`Rv|Awt`8pq^1Q4#`padR^+xD*f1Whp1Nh2>BlzA` zYlkPRiRC^^1=Wcmm$ZRzk@Uz77FU6%D7z}8eE($9E<$rR!50+xSeb< ziq@oTgC(}k$3@wJgo8q#pfs;#5Z)$=75)- zXr`MOHW&BsM~O^$p|V*Zjn{Q=^wZ{?S6eMxVT=M>bkjhdcrNi7X*bcJkNQ+R`G`Zn$t0>bS1@&W@Aku+db&=0d0TB! zrqgQ|)Y4*2yKQFQRJ3z;*^U*lU7A*|1fg=k0X2s~o}OY}eK&mY^Icqc$ynrzcfjzg$fjM~@~c`{+<&F`!b zUtxZ%X;!UfQeUx7dB78|Cl8NQ-Tp6SDEBwzy;Qr?{w}73G6hz;;t$EG?H0(Ux1r{2 zk6p@$-4~kazHNp=VXJ+|_Jxn~(EE$9mRuY~6RbbVNZ|t$uG>_WEa@_~b=wRLtPpP) z0_{)nh0Cx<2b&qI)v{3=65esIwT?KP-6>J*K@8@Sn(9cM^H)zts^2TU$*0CFU-ge^ zu?qB)dx=OMhaI(n(tJoJXMSgTcTb?GkT5q8V2YTyG;KX{MgMrvjM9uJ8~0shp)7iY z^{?lCgSFYC*3F)3;$B@dde)UCTP?j5h6%X%3zH?I17&kEQ@<_4VCApIcblnsN4C0g z4rW(8pHD}iz80o@mSOY9`qhuv_3so(7k#Rxl0l7pEDX_z+%}8$xbxI|KsH$FiM0ID zRr&jXXp?YB(i;7tMxK?5*fXVmRDG?!{p|!Uy&q9~qvXkwl8^e8N2zs}TAR^dM{aWr z;78xDcf44&%-Luc;j>cK0*U-0D3^`fX^PxHn%YN@XS$T`6 zkr*wWPmm(PoCDr>r{4{=iut%191otEr}+1`NjrXgHuEt?HAa;W`Y%%i`a?@zz87W` zL*n9RJN?3^-%m!%O|P83W_+ARau;z6!U6W#+M6NJCEj8>@Yg`$Y&^v70t5TYJlX=- z(vz*J#Sr>8IR9njF+4p0c#8jzx2SYJUEH*umA=25exu##jDv>+tk_6c+(4S^u1>C|l*8dse890uI@%d}jcz4anmFTW2 zDnMy$)5-z+OrRhAfB5GwCqmxo-5iNfV3K|gxV1c|o$g@=>tw_k0xf99cm7pWW@0sf z{_iKt`13%g63ro?0c|zWjK!iaJHY*Qh zFUE!E0nI$iY*uS!LUMrP1~`G#<{$m|d20%O3%Zdz3G7R?wxX|oyufT1eppq_S@2o% zJ#a)>@Lt9v{b%2jdhAbyWcs>>mJ!&kW|uC~Jy;(yMV0jT)p>G1SZOx`Ak`h;Cg*=P zB3|)N>WPT|((30HJ`Kt^sTd`wXCr&Zr3?Gggc|o&S2tu(S5bjXRM7@lYT8cHDt^g9 zI;kC>z9QnWTkGASuXL!t{L+0I*@PDAw>d_+%YDYd3#C<;?puc1VIL;Y5m{O8-7b;P zU7a+P!hC+}CiFkZ@@HpOrJTCEM_i0p2m%a$x$?6@fW3dOU}quaCdS%;Gs$rsNvv3m zKJsWtMfJ4*WIrMAaaG_*74=)st!OQ$up5}uPdt6zc)4Euw0Pi|?^us;duN9z)?BV2 zFBrSyKKnA9jTOI1>F()ywDZ1Fx?|Ank;R3jeA`q#y2A=Z{(V%vZbQ>8a!M|TIg8xSU;IwNtF_XogLFXcjd}{P2KFn(^q;87lOheL-@e0ASjd$#Nh0r=vWU{te!Ew+h23%V z^pui=S9LWg4|m@~DjR5@2n&t8q&jzzcMLomK`>Ust$p~#MIwD*(0dXns~Hx_c! z(RdbzYH}%ig@>3vg5@Yzp`(}pzEC87fiy2<~Hj^QB&WNP0fu6?q@7U+~h`QJEuBbiMXdQ@#>{sZ2em+}0djbx|$v z@imEFVkyCadptN`iYGZzG0Z*3XU6`oQ9_L^|s|1AF>K#{8wykF9ZiD@^aV$q7VP;6=6%lV4nnX2VDd3Ef6?& zJQx2}l7BFSPkyOo=6$p_)Z0kJa6WLm_;&{Z^Njz;55FZCgr&#YL1DVmqNIPt=lb;n zV2P#DhpBw7q5{>a0_JN_mGZrQy~8^op!wI>n6DZfYH!7c^XG+~r=lq2BfSOYZ`p(Q z%Mw-lWr=iM%rSNK<~1-~QIZD-vaQ;QY>5#tfEw6bnXxoCXV_R@zc&*-G10DlbC}Qk zWYqR{sG*5T!TNfjQotM+w`#E59#H<8LN=G(yW&|T=8&eQj@mlPFZZ1uS^gHefL2cR zl}&g0Je-N!fZewYa^>OxU`F8Kguf^TPDTeQ&QA932@$z3+MQG3&nn;#eDy0sGb% zZ7Uf9mG2+w-|-(cAW9t|RhaglN<;?(bX#=^pspl=EfjjPJO6eAYyKJPvvL1Be--=T z<%`pLFlnd9PkfUll}1S6h01nHpfVY_zg5!b>L|;HknT6&eK z&_)54Y>e`I|ITQrH_nSZiOp}4sp2!EpFDQK`|`y1{KWbs3!tT6s89dmSb$0g~ zEswkP|4*9h;iIvK_|6+qvdK6agRj;5_O`YuW1Ua|GHHfSMr8dvHL@Q_%v0m@mAB75 z|K&vZJ+GZOgMRk%>Z*JT-l$q!RvbG#)ARNs-6^br$FJ$>x-~!|#RdRsN=qOT+wo|=)~w#6Bn zkS>eADdXdqOD}fZ!rO->@|!cW%65F^;U>g^E%e zJ;jOHB8yj7Zn^isyXK@M@j{N2?O4=|$zOUL{4-cd{tCN6WyuDgz7@UcN6p5RdxoMP zzvt>>&?jp(h~lns=(16$xrUT?scn6*{5X3rBSRm6x4bvE>3bJ`wN8z;B=}Bzh(8s#jk!n7Gt?OWpnTe ztw8^4(;uaKHNaJO3FR2-?5zItyFI6@p?yPS3Nq}bM-Se^amySp8<2?X-EE)CVdKYS!r^C5a3V>8v3`qg(%4l7Pz z@%G{7&70v0`u&07tKZ=T$ZovSU#W?+V^tTaJ`{B_0S#Pmqbr}E&`87C4KV6F80{Wl zsjPDuawuHm%hqx|GOs+Dwe5?CPu~q(@8Way{tbR+P_dMKa408#6BNSdOifJWo1S++ ze;i#3qtsj)*gpRIra`4*#;fzSl%`Rvjlgx9-ySlktdnUb8ssGeU@EcBgMckAfRY@+4r{&M=M(?t3hOd zmIP20NmP?k34c>Ub@kc`s5L2%Zx#3c`E!kbKCOD)zUr#`H5718j2IMxmy4gSbZroz z5};?&Hg!NXcsbV0$f(5mKgMdmsDT#)TVZZZjRL{;d7*cHzC5+qotMDm6%!J=S?1L2 z`RSxb@?Zg2PdH^Mc)u10*ZY-L9uq<0e8O7O#4RU*++A7Kn8{0dk?~8l7cYr9TP!H+ z%NmD<9eNhHzH=upzGu)5iyxk#o`x#R6o|YszEd61ge=c7h{#HN^yq0sUY!ro@Bvt; z-Gv%k+2^OnuOkvjzDz`V4^rC|_a5Z8$DI93FAbWov%UT^^V>z( zL`WwTR#D&$g|7_-ydL?^8?nu}&BO4+jmwG4*}4a;I<&V7@<@tzb1TB5&n~MZepW<+ zQ`m}Cv`DPL%2%NLg-iIAA|ei``%R~f5%#F;cgq>O z)nX(MTsj*E9D`EI;oTyrW3{+2f1F6=Fs=yjts9*Ozf3LiGftA8>+By+UH-_D&=B8k zJ!>!xoObb3r&7Z=r$m*!M|GaP7fostzpxz)%+Q;%ieY!Qu187p#;gOsnk6C4hsE!0 zETdf5rZ47GZq%oA57w>0dM87BUaAO%aytxTt{6%*2{_QCtDY-(83J66;_#i#;?WEW zIu91z$OwEEs6m>x7mbkq0P;&mfE(tiIJYX?QMY{%>>cAhkVhMokvn()nzY{)a10Z` z9wb({cHH!cuz0a3F7qK#*TvZQ4xbp@#rnlimjE_Ks5vJsO(+({lMl$ZmQb2tr3z0M za3d1)c43b@HtijFbv9wbH44fID9umPgU;V)ot;X9N^*h>N*Nh-NE>fnLczX^qcb0bDy@t2GLIHtc^;4Y22t|+s(>~1hDuash=v2;5OjtqB;TWw+eRF>S+@Fz<1zoBqqV^ z?x%DX?*7CqEvVAdT-e?9khv<}X;{9tygBj-GxCdrPBtsk=mHmmp3W=+EC2!!9&`3n-I<`o96TTfFr zax#{oI6_@0cCxa|^H@TruP#D;Jtn`fPQa?#pv3n1C9UN&!7^c1NB$JTUh2i)rkB8e z68J|AKE(Kp$12Az__&Khnmh(kx^=D?Wd)h-V+xO7BC8a?ZLUI-iSuL{~xRhp~YZB|5OyKO6luEHnF_1#W zru!?A`Ji}rRj>sjI0Kv#`0I_0k1N&JrDvxgD(mi5uQQvK7AQ#_vRg?7k(85C)m_9P z6}|g=ABujQ=JfEUv98?kF?J$+$0WZhjllQEK&Co2H|Gdy+jCFqP8fUoL9Dk9h-&0G zpJw1nt#EE)xAeSN3PB2`s;U|jAJVi;gfcF^P7$B=ygkoK@;w6iGOwPT8>kjzHN0PxKevU|^d`WHYyhd-C7 zT9jyc;ZEhj+1Uix0A@RKXzdbl&CPYIqgg(0KoOo0>U+$qw<}XNy5?gf-2;UpG|PPl zg#t5Ne2a~XUvo{rjP>sfF#r09+?l^C)uBjqWx}I!0=>ao3EcR|-)l;}@zVTg(fFs8 zHW>HH9{!)M`MXdZ*SDnU6*EL-^)s`ENCT zYHMg{NSOO2Kq>Qg0VHTOi#K3RAiIA3`VrGrzH7H8?`xt~dvh}}3cj(aWO1LL@GmZp z30ho}`I1zEead7jfIaq7kq{cY1L6FlUXM+pNZq`t56scsp(ovG zJda1?NchQP5`F>{Y?bJ~EjTV^t6zNH7pT35UbBy2y2^P0I=jjeMe1mb>=0tuv zgnJzbAUH6D9c@Eb{fwx8nqv0aw%U_h%C_Pr@NY#`5)+m}iH>@})!wzZ4RTdB(s}A{ zC2LgX7sn{|bW6gT_itd2$8)}Mpfnah>Hb=O88N&$LF-#4IQ8l0-s~fqKo+ad&1O_u zY$iWc4kNVF3A(dJKb?^h5 zkd-}(4?np6SYC*=E7FKY19dK*GdhTg+<*}I^#%@OL@Q*)ss7>?ZuIyyarmZ@_k|N? z0za%$f|u8ArjF4lSN8VI11wHo4S&5Xxoj8-q@Bjat&0e`k|5muhS8~`o20mVA^}I zF)xYR(KjxXQr^`*r=+4c-<6})du%rq?9a_Cq#k@46n>6(yeJi)qvW*7;q=w1!K|BJ zpRr@Uxb1pD`9gG+()B}AZ?c@Qdcv&De_k*};xNLos~8&477j@x5QMS$`SL27xdY@m zeC7@g#y|pWUee#jZ)Rk@ms!dv$vq~+={`&Ke4dbwh+)O{sprhU&U+~5H_Q}V>10OR zd?rn#;9}1Q8bP7^WAo2KkB07grTA8SsCaucVj~9CvD02ku|?1Qn9-}qaIxJc$Jxtr z{5K`Ou|(Q{P-1X_M2Rh>gqnR>w&L|dJ4j z6aD zkQ);QiyPbAuVz_0& zdBnlA`OkL97+l>ol^HLQ~JElmj$SehslrEVbNqGkhF zG?kvKPY0PWB8G@SRuPcJytvQOPRSct(;^z4%-)Q-te}EO?e@NhcrS%d%v}-upgLs`L0Y`Z30JH z9Aq76V)EAS-lC-C6#!xk!lqY(M_1ug29D0c!NB=6vO?IRevOV6g8&u{8^>;T0S-~} zI=$aC+aAEDHr7S>0TPS0Ul6-4lSE3IX-+1+*=c*W)qb6cl@%ND=f|swMlF=0d-|3s zc3!dN6~3PLO1jaM)3f;A?Ve_~0k1-Ajz>!R#*yZ6quV(?M|o3C*OmruL{k(Tuo{`) z>YkelHz1U6E4@+yP65L3<;ck1I7o_0)(3xzUrA}#0|RAoocgY0bSHVz{@YwL8O z#0Dn|$GOW$*W_RGoUpAK)+T{d8PerF)^0AT6^JGB)x5GUI(S2TmbxtyGZWJ!C}8s; zfEvzXQxF{@NpH5|f{^MbPog*y=6`)jTe!n{<}P>{V@~l9(+|!)JSZm>0yn|j+$f}svUf|yk zMvB4J4D738sb+=X^!?`YQFE+5Dmps4AS@@;_FF2(1SPeGL#)Lw`ZNz3B9huXs1CF9 zaaXw@rLLF}P-R5biqES!#4ii!wXMymT}Y?K=0WgB!6+4lt0M+je2s=!FMKeCkjQIi zFB;jq6L>PqB+}`3^VP2vhZ;lH{8!Djmc7hSuD13S7inbndO#f+JMyTFDn2cx&FJ@M zAuF{|;hedQj7=i*5-}OA$hL8fsO$mw*atMdeJ*`w4o z31rRwl8TBOHQjDkU5+0R>OW`uOji28Ph9Cop)iyr`X+|*0oB2Sv$jp4LGl~S2@VRU zZ&5(%^dsyY`0TV-0&cacvwTXZzhJ#Xon5GM`$L-4zZnt(rOVFFPIdVD&)Zgf1g)_H znhH^5bkn(-(d(%NN{klimHg&NKZ-!9b1+q(1R;ZhjPEne4`21{!)F*`kI>Q%rlq8y zju`EE<|gq!GK|bv7d6g&$=Y|>g9btM;~G-6YSs$#C78jg4AqHeUVit{Yotz3bazX7 zXvFt~)zQOi@5Gh-Osf+P`zqC|%k#KBRlaLlB$(QUn{HB87xkaF3evtJ5Dc8A?fZb< z$k%)S(whW1)eZ;%9k{r-vYdc)D0nE^8T=F?)Qf*JS3`|CP&8^hxN$cDBn0}f_sX#s z?KYljYmzG&9d!Y$e=-rkjB$>{ciErQ*Rd*G()fwBt^y3dyqMgmrqk%$z1ndnC7pM@ z_Tnlp7>6v2ZfkEY7np`CH%FBbd2#PQ=6rip#VJ&o<5-@91_kVvW8lw&d(epCkjnax z=(qDjzmocYt^Fa6kCX69H=x&@Q3w6@D7HBIbvf=ck9tY))AeDVY6c4vQ3y(NZ#+BP~JHztLZO7H0X!OJJB^gwU99YPUA$9%v5LKbBC|-I?-DExWzGi1&xh zeau1Y{L_BCMO2(iyFYsdc2(^s>)+@p1;}U7dfAYx(@|(*y(}814+S^GWb)C00jjtGzBS# z9Gi9BT*Tm zdGw#czS4e;rERsr;5(+3~ANch7C}3K$$u0CZFD`xs{Fi z+J#;E6aTDgXIZxL$-)$9LQCl#;QoF!3 z*P1<)yKoD7b4-93?{Mo3UhW3ETjkQn8)d%VWMo1{?dz?+-?aynQYB5p7&F-S`{uek z4@Z;Dt=VhHbM*nTR`dkTl{fZ*!Y3}9PT%=?f?F}*7Dvo#5!(^LEX6dN)v)zG;kv8u z^ZAGD2Os5}60#~E*sFm^oK6htOdHbh`2MoIhyx&hGrjzUsQA?=|7mKSqC%5yMypQK z*qq$5gL8jvjjvIX-!__Vkh)NOp+oIUv|ZHiKRo@N>Y+MAJei5A#jHPCL(uBO4LPXc z*s5=XG%15h0s;avP}u6fqbIlmzGYQc#T&`h5s3KDW#^0zMQa8Nv+mVGc?0zXyk!f( zftx{w2<3|pm@V+EdT`P;KFAXUi=*1;XI~y-&I~>9j5S3nk51@wK7ZGm)Z54SPtTjQ zgvd9-ew!I@ir3zRuCv?dQa{=9Qd$WM^wxclcZ9JZB2718RQZdmg(ET%dG5qJofe}2 zVIhag?u~EW-rkm=Gi2d2LeRPTzVLH)Y=o{VuC5k#gS=j;@kvQVU%s40nwC~hfCX&G zp`%=GbJxLY=*+2;Cj;FvRYNqtZqYKvn%aIs74-_cioR_&e|+>4$vg4__4!(-2DB#X zUL48V_7guxEeLJ5oyG2$NA>yJpyMnJfiy))eAB)2JNs(eocBCpQW* z-;Yf|h>i1gH?%!MLsQ!T@xdIpUN1ocB*QQ2t)}J^?Q<_Hk^t;UP z5*d@_n?A#^lQR38ZCGd1A(y<`_H0-wA#r=dz>;K;h0BSbo(9l=o#F8zW43cJjN!yau*=Yn3dTBbwUoE{mbGhqr%G)-5W9Es7O% znH1um_lnQkME9%TzBM)U?k<&oI*yQ;w!vJ4{yuZh{!g6c(O0piFccaZ0+Gb<1s=-s z)IQtPOWkkpGj~!v$jLDKbzAfk=R&#VlC_i&vT@U%+%K>$+E&lj@zlh{Pswle@e_2a zp?WA+&Do7@vgz(E-k#oC=OO(U0RdGe7Fb^Af~R%%##nW0&op$;%!)clSDy$se;WCE zOlE~14iKXG-RqIhTebFJ1(Lpg9HnrS!5>~}JUDOhXlvTMsM#-FsIn!xbL#ulbvc(% zu7igTG1#0RD0dWH@EWYZcNq}m`K1sDoNe5N$@wn&+!Kx9!W?Wg`nu?+e&RzzUP+Up zYNkSq#kUry75UY2vxm(eS*_Wu*}j~)y_zqm&Mo=;p!tUG%x9qD%<9NoVNoQ{sbc{t&xed?Aj0a8t>mq;^en-2*T%-FJ103Iypnv zu~TuUDn#g=gKlLgnd?{M2ah!ADQ43jib-#hnvXnYzWv$8fl@~|p&0o#Q{*cjVtl%A6;a6mpfdHu0a7Di~LZy|qq zzRUXSko1oyyaxk}5R8=~c?ZCJtPo&o<X?(yXW%!r}O> zQ3Kr5$80)Em1=V6#hTm4Q8eulI(gnK7bG!3x6-Jq>_wm_P&&3Y|SOc`>t!47p_eqO)Pcj2V4kf_0AkQu@V6aDVUv}kmW8=#S^h;G^$hm=K=uqZJejC7LUiqBW%pUqxsd12k*EcA)D-3MXl`%xxs_bRLOZJbKFS_R zw6PSgrhfWoQQl?wVus4j4K#{adsHNk2)H25H3>1VnV699OU+61V83t@p&-p}_tCe-Evt{D$rs2Eqe+@@Ce3^OTC2UYvq1ayQBpO-3(Dn1Ezs3U?TQ@7_21tdVmX? z?GNEe>i=w~jAv2AxP_^`%V2GvQ|mWl{Tz1To5M|E-!2s{bDU|6Y`gI+nDD z8;bDZkG4??_4BiL@3+8Q=OUL}OUUpsAe>j!6#gF|2O0G`f}AUhNoMID`jro05a|pC zgwx?}``xdapqhek9OuX1AW}jhlpjLO_TRu0iY2|o9V$XrPqP5^5{4fGX!AFoMMjEI z!U=V$>dxc4Qd{P^5mpxCld|3{tWHJ$ql9~Q~o*Vs|k$e zFgyJ7Ny#Xrg#b4A9|>#CHI8A~zT6`6SB~_4)vaK-hoe+Qd@Cn>|Y5EuJ*R}RlFM!3-NNDUi+@e?Nc=X(%`;d^tERJPvwsge zaAu`dAC3+l*Ww{^wH0j54CmXK*Gd==0-NtJ>~B3asW3r9$dS!2rm1M2yBWJ{kG9go zWwUXwN3V=_4tFvh+_1^7+tll;8fNy5=%kv9AOADuB=K?56Ltu6$dbMJ{C35Go)*qP4=8*h+K1PUw5vA@R%dYveN2ffTSRMK99G{_QZc zev-V%8Wnb2a$Nc}{NT%mrtf}$#88}_3y}BT`lX4QJ@uN~(Ev(zG02v*{UCk-7Gn=t z7t(|uZ%UEi*&+hDjk0>|ej7*!6Zn^y(bd*y+~DP#$_@9RJV2KkFTUTb>JVIY;Q#WA zaAiIy4u+<{FNpTuMgCogXezP$ z;qL$ROE88wU9Xu`We90S({-=A3=!s)vY4Lb4N_hdCH^^~J6R@qOhK70IF;;Z-~BKM#NC zzFB-jG#N~k3L(9(OMNekWX4GX_fI?*GeQyLM_)!AC@46W-)#VAIn+w;`4a}LZ=qYt z1LXi|afxyt?$cA!*|S2%j17iRC)@Z0>;rtuf>#$>Z%3g_j7PdurDz)}e*%H!Qru;v z)hS-HG<3L83mi~mtC7XEe0Yr5PoDj?5W2q>Zo{)3%^?}j(bT8IHphm7zNVssY%?<6)?#YHw)~MoL`*^GpVrUW=pbeCd2J#w zya0_5wx|__KF*73sG!;rJ7MgW<(w*b`Sbrrqr6! zZoj>VJaY7C)0Lzrl5ZOZz6|nws~G=K^luHW$J;TzkLmhHrDIfK$2_R6!Rn&#*Kv&U{YaRP=OJSyWhz5+M%TZ2Vwey0=DL%nI6d zh_bTwz_Ox=RR`>s7Fdu(?FdrG^Rx*7|I=rR7*gz{9cn;f2$3Oue_K8mBZMt_RrJ{` zN6ln!cdfbQA@)&h)=Ic}(t)3Xt z*)e^bz|A{NLru-#=~G2<0~c&ekUQAUiXJ}n3hP-}w84*ma4%nW|98MM|4yU`5-UM~ zzE!0!8s_Fy2}kp9+*QdYo7W@~cfdY5Pm2|OoD6~I*%Ldzf1u5chsU&X_L(wyAdJl? zsuH$6O~XZtx7!S*A00e&fczl2T1(gM&+ET@I_jxdv1k>;{V4x%A8%?<)6>dkb)c;WoQDV z7S)T1U(-{`getRMUQ^y*lw$ii2IQ8c9D{-}5=WbM_~yP(fvFI@Or|&OUzPFWSt)N{ zYSM&Uf-~ryNv5W}ZPiH2yb0Uz8aDkaLCbf1@TfCJ6z)Uf{MNU_tr<~Gg=`A$>v zypf&`MJuNv|Gq}<56hnTuSmH5mZWC~Y z7H92(`x%CH4xjgN`cA)QGn{ZdE#SlAUH;7-kPbYqsi`SX)Vpx3`+{!h-|c-6GVscU z>v~tz6y$@zlqU`>iC`y4Tgty)e))TckEA2z#3|`{YDv5qz|zytp=0!? z#nY=}Tdqi=$5F8G&IK&n0<=ute#<*+BN>HktgOOFf@CNp6Uj}|9S?y@0x4D6!-q!h z?(V*G-xVQeOA@LF5SX7$y{u|(u+j(<@ehlnASB44DKLv94zRD5W@TZ?Y`!dx|E>?L z>%L20o;r_jjVS_IQD<`#!jDkfm?5GSBgNCf)qefFG*^B_-b~v@nZa)h3yTdZdY;hi z*eWZW>fqmC4S%Au$czxsPJINDu~1egB^6=1p;KKd+6_-SI#wITy0qROlko#n+Iv7O z5cZ&pO-#hz#JYMxyDYFgmL3w7S%XfLXX~8AD=YXAMC_X4U3S^gGO0s`ND~8t+_mA5 zB*4nWXbMp%*RLNwd=O$o-jxDsND8vU`|L%iNa2BqjB@xqntbrB{)MDXiYsAPBz) zR;mSNY8!7Q9~p}IRHhQq>3skX^g=rN!Aif(x8g*5zol!-3KU-pnl6`kMZY@VG+$EQ zbaI}a*aY=v_p zOe6*>5`8$+%?d6Krl8BFE6)1(rORwEFn3%43XUxbs3KC7Ks=JOF_N0NEWtyJl694A zlEw$5Z&ERK>n(&|W&DdqMds5$&Yd5JnKw%ZrkDapOXLY;zk?#NlkI@1}-)C z9wGNyr?S}g8(&};P)5Yh2z+Q=^Ak)F(`{Gb7m`S9HQUo4*5!`pm_Ef@qdelCm7+H) z;3J<6UYiS3gKegaN~N8e@$ddX4*Ss2$jKklA5z*cUxQAQZE2t{Sbs~7D8RLcC~v%E zoc}o%mRlu+C3=8O(zqd^K@_|6`gZm@w3+!?@HYM1Q=!GZ31T>)^>e+8xG>ug6xPam zcnbrBl9N1=Ya=_%{Sa8Up_FRMmnVF5H%L3+?~8YXiL}4RSr{S~5_U4A>_6v%-epvG z`bTV93WC=(MK`6zBpt1GO(;R&_Dg{diuWPvwt83qx~&zlMO(yz@Up)2qLlVuZQkv- z^VhFn%+_w-ylLK*tvatXRDOpF+t))0_^Q74t;4VAwz;>LZq5DO@ilTAC=HQ_c@x;d z{!1Of4-}?naL%o1QGoR%U{a02KVKi*rKI|;yYiar!hXOc)h}M6P~t*D7j?lM(9z+U zi|*42;{(YzBK;}A;i~BwB==^tw6uq=%pXzx`$`5f?Z=VQdLgWK-^@xYbl2cq+R^}i zpA5r~wP6uaQQk8gi6)`WO__rG@n6H8LP0@znAzD16;~W&ZvH77ggQ1e->S<4Ed&J^ zuJMuYp12L}^F%xF9t{!L{2FyGU*hk%lb)Xxcr0M51QS=lafsB--a4u{K0`rgjGaQV z7#u)H&C;iJbZqZVu{Qqmd>s4F_pCz5im5+0H`fI0j{2W2%Q!AFB%h{ec&qOGnzAfc zjZU-w9n(KWDvg)jl)sUPqydNWfrxG({k#X=Bc%;E=yuEIsxw>@kNSU;HqMR)h=>2R zBY~MGP!}ho@q%hAPO#YygUdZ(j&9zNRRVH2u6DTXE9KxPfe8#%-jY;C|~#ChK)pMlwspB zQ1&PSEIo_CPI6neHl~USu%;5a z-5hSU4;_+KNHhX%QQ{zbUreJY|DkZ9y}Y>ztB@k~fxnx~3~5&j{9n^-Tf#MIvUTW{ zi&l%Y)P(O|`uS39!7MG0A_4GCP{2`Z;f5t$(~~?_F<1tM1wJhRnxSCG0Up@{4aI>c zUY&zjPn?_|8WJn2BrN#yM2gYESy~Qx0!-k{18Q?u0~-u?58t)X3J)xOlCSdVcdYxe z*J?(wS0=8T=IMSgWzi|MT}zMO3o%#cJPaKTW#HH`2iORFdcvDuV)Cv8o|zeJKQFet z`MCK0#QoC7=?7=0ED}+gHD|6t)xOz>k9>fXUJ4Jcf~NN3suWi~ddeaQIVb~|Hg?Pa zjzR8}*wOV+VWpPB&QD_Z=641EU8m9qKDh=M3k@1-s&D^qQa|!vRObjN%c_e9sr;Be zCxO$^0%CNW{{rWrpr^MRuLf^c?~#1`b@JB*oK8Wk-oPQM4J`_w_0bTt9W=njy8Er; z;$ShK8>WdS@-q&xg1ZFZnJzovwB@VKD~}(6Hi8*gW`LHK))PuA0E)T@ON*dezmI_0_)KuO}yUcq*`Qt99eC;9RT!>#bs!zIgL7|Ez$HNekqX zZs39_cLg=o+%10Y{%NX5zJJ+uvhqiZ!pbJq51p3pP5#aG3Y*YrcYCV9{(uc11o$|9 zurN+Od1StS&euyXOMdKqDeTXv_R^bWQLxySJEnyU8_U=?hD4;Zn8B zJAf_CzYqWW$W*%=5D*mNs4xY(I()hKWM;-rh9e<=*cm5jW%S9-pM3Ad?vIQ8iYhj0 za_^q3B%f`aF(b8}r}fw^-2?{~_#_RL&SS?r{G zRXeE1^M^*`3`_1^vwU{7pLF?KYGnmni2c8*f6<4IOy?k51z*Eru|1w@``@K3@{pan z=FBXEs%c-=AGpG*|I*_-=kWac@=Nk3U<6&`xU=;R@Pa3o~sMGS-YG?>GKq=s=u9oCtvAeb+^Ac_?$wY z9Vjh6$OCqbY(S@2DKf0^uL pVE0D@*jp1l&koA%JLVtw&mNrmY?GO4g9HN*c)I$ztaD0e0sx-_E^Pn+ diff --git a/applications/Chat/coati/ray/assets/2m2t.png b/applications/Chat/coati/ray/assets/2m2t.png deleted file mode 100644 index 94ca1c361985197a56966342cb4a340983b46240..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 171753 zcmeEP2|QF?8!v67LhGA0%d1ieGuAJWv86(mQWQ-L#x~Y$SrU~*3MpAa+NG5iOK+=^ zr6@%xktEsI#CPt@y>RdKDX z+L$p6HDkt%O<|k}j;uI3jUD{oSZg%}+?dPh^UKDJZJlN#x5dWH-hgOI7{h~=p?u=O z%7M@NM7*ID-i!w$K-A~K%JN_^y293*&4|j{7xTlZA5@Z3c9hijTOm+AVV@GS%D)w7%WysOk4~I21j`z3w);xV6j+} zY_0U*A;bs>!@$8?84_SwVFY1mKq4St6+`|h0%rori~t@F42KoL6mLV^g+2)*fQMma z*l`{}IXqP0gPA>fENPFcptum_d3NDVZQ;xEU_?y8li@7D2Sexsa=?HDMi(QX*YWZq zmbMULzzJ9b0|D?KIJ4sr<+tEPC|3hdLiv5yYl6e5`#{sP8zeU1cY=A0MGrpSlxS!U zXGF9is9WI4 zYRe9oQ!rCd+83J0@^~|%DKutl38uRU(DVc6Kz~5#MtOO%QlLG{5XpC!UeB`$vBNM@ z_>k`?DPju@J?<5<%4eIZ}n<1dT6ZDDbuU$10 zbO9PuR=oEzN6A!Bj21E}s;^Ta{n_+`ojKI>lh(5Vrj@L2P?q5L;7K$!MWN8ZfFPo$ z2P8seyY)rIcLKolRaUUJMQ zkRp0wgq;R-5+tJN2Skvtc}59RU0$PKrD$Mafaw8_B0EKdMfyvMh<5xNa0Jc*eU!2U zUMu94qYQRe&1X(Bhn!;&fp@4$_)zaXYg!&cr;_fwPVGP~wHBpmkNU$LVFZMzaEpX5 zKZ|`JCx=$-gN%?epdu&()k7!*&6Pnnet*Zw!@^EAGXBW8qLjZNqS??s9ugw-&5R-* zeP=K%w`C)h;sVqNpnRYyL~9nHrXA&%U81HQgN7d6-i{d#i$n;1NR&j7m+yapzz!n{ zoM@T`)*7fc9!OrMt5W*~6@$nfO~wd{!!ErInv6jhnt>-{Fao^@)aW0M!x|TX8?^$) zAn_C$Z-U6dzRke?4zhZy#b6yyD-`G)mR^Udh%8WZv|}36L1@%b6qU~Ew9Qa#_zS%? z;F0Mch*-2MOc9%ak~mF2K@c%0eS#@+H;O$B8tV+j(vYeuCO2xwSbl*qCL?@tX~)`!Tt@s zI-x&egRkEiaU{!y_`b+gLZ%2B0*LKHLjXCiZ&L{lY=(TW(II(su+fp^?GkJt!u$)t zhTw3C?S4nxFunU1#Em3=dqDT_t$4t~7P1TOh5q6|P&-Np$!7sRjKGSAj-rIVC-Vn; z#RCRI?XOWeXY{97D7S>IECxv&qO2_aN**v6TAo%n_p}EIbV{!~69QEQk&E7bSsH_S zN&1yMU@+87F4Zvqo)6yll7~*ocb8s|+;2yT0}Pdp?IY!9pGVhOGX6$peO(DIPS;QJ)1#BlM;l1&5(#j|WA`!yl+i zB$`6PSL)OS-K2;_z$hYfFqAy>U5En-MJ#po4b74V_%u`~HCRg?enxp!e95tqIT8-sx0FVJEL_)++y3NdOEV9cv!2{T*fXN6kaWxOA6pkCS#_YaX!FVH6c^ek;Hq_|WwCMNF0Hq`Wc;rZgRe z{s~;M)B)FE30!|)l7F-20ZScl4VuWc?~+6?iUK8~PSF{|en}c57eN`1&cyE!3KD)_ z>WGv(VupdNc|ejkD6w}5(*{k=!*9qH79@>mw?ljOKYaT)_UeQ_T=Osjqp#l>@j+4Z zAOxqQhJLhxNWM$7foO`Z(FW{D{Do*kXxIdIzaehe-S`XQ7KRmE59p?E%>z{D3>pMj zIcx=sl;PeGKk4#d4KQjW4Rz^&It!TMt*yc8NvOsS9F_+wrNB|>ilB3b1=J_Oz?5JQ zpWS6471RnrOpic&f!|IsVIg2)u(&QQB`8jOmsKKgDAT2J1UzfHP9@mv_utr(!#nU| zcn7d131JrDwh5q!5svEbWW8v7bvy%bk3CMRNH!=9V zz6Ka!>MXyk>_+fLP`3n(fEGbbg(_NvWH{i>CNRhp>3ivgz)ArTIO`1$13dc%MXL#n z@Xu9muXRCZmlyK9h9)`a1=HEO1^NQLKQztA(|-%}h0+?PLmMD}>-<@ljxSww0ldfw ztq3UB)~D7M1ap0~uIRB>Oh3(kL2;M``OEWP7~y^p;88LdJxzZ>Bw0rDbTCExI{T@a z3M3Elvz$gxEo{Iv>Rl4!FKG~@o~Jug_uD)uDBcnf8^rBuFv35t5dK3QLUt5>TZeSG z7d_~ZE>oPUQoo~XR6nVj-uVzhutn(VL%;_InGO-^>Jw1;5Z#qXxAXf!i%`>au%x=V zvvhVEAeDvcX7ZDsIvw+)@6H6I3PeR<%>t`wm{9v0?gSXk&gOLBOP3yhh?j_T4;!hU6123%o7+Hr|0u4mfHc`<1lEk|AJn*z zW}iWjD1yb-(PvOd7>3elpFuD-KM^Wg%vc#@#C!RJu0Mvm-gOQ?$|dQa3&PU@9k=!S zJ=XDUGL*()v`p7yl;!YhDoPvhVw6^w7<9u3NOh|Z`PPZu3=qwBb7*#nws2^yHlTST zWj#jk@rZ^O`ZHM(AYJ!De7Z-8O%G3_W!}*g*{s>T<%rSll*vh8@-#$O=`238DnLXd^-k6NsxYL?J<) zMfjU#dV?JkG#tTUhzV*=_^+*%8+3r^=2;+LQI=yE0s?tU6iG7h>;4R(auqn|-LbVx zXAj=Nj+Bw1I%u@I>ryp~bPI-UD=Nt}m=-q<9xQeEBGk?Z%~cRkFbooS9Ysw;%s~}| zlaDAxgRrqbD{6rIno{k{gP?6hwMQE~;D!Baq>2;4GJ;W*&HTe|*MmCP5)`2Jqv*hc z4m`Vpw4Fi56K0`M!-?2B^kM>}9BPQhNgW97qM9I8i?U&zYw;kzaXHZtLXHht2*KI(Q3!$jK1DnBJA`1RM9ap&uF43|-44x&`vc!V{Fb7?y!Nbrzh8mKiYyh|(Xw-=i=Tgpi)oePo8r(i!;JZOfR_NQKi%`UhxPCY1imY0Q8lmE9JrL;Ag0jQG0agu)M&QBix6PghcMAk~tru#EJi3cF zybj&S#thkb3fkpn3t0J%>{hY0b6cBU>|KFuc!T=YPxr#IBAF0Sn+|krS3w!6pLXh| znN5b{gm7FV<85vW36B>9G<@OfOPIP+)XvWUs18p zpYF>&xRt|iCCVX<@Qa2Tf4C#~pV$ft?EY+Wpw~ZiTk`-L3uL1yGy;&_+q*2IK~5a1 z4KaQzvE2&<4BB?J^u-I+KFJgVhJp{xUSUJMo2?*q+Ion!v!yRA=(hd);5IANspp~A z(RN^AfoKnux)_EnOld5K>fA~k_P9`C%24k{NnZ#dkb#3)K>^s6LqUk9#i8&gL$*65 zjTKK_nw#Qm4GaiY6yuA+QYWL|3m3)!)xiyHvrG`w7bb{O^??I_5f>4~;zht;DEAvg zUM!3Xq;~)s6v!+@d$9{Z!dBKl*C>SJ<@v&i}gt6=7e9MlFkV~%Co_7G6vaq3>8=m%iTpW)OJH2itrd5VC-$_^q`NwN9bOoqF`wew7Ll8 zF;NjReP0Gan?j?jX+yGjh;o9JJ}N3gtR1?f2q7besA>|W_J65Hvy)yM-fJ!?$nJN# z4h0~@dqM*UIW|-QMC~n)0_YE_)_+3r!wLy;f`&PssD15GJoPW0;81Ra;E7hn1mwS< z9OF>~2juY^|7b5IMDR4S)KxEP2S(xb8`uIUq75&kc7K?gzg`qBjN++3@id~;l~^y} zH6(*~swEU7B6=75t>T#B9lcYaJsQ1}9l73aN`h1Z{EW;SDdvVw02)#DMI26njp9j+ zk4|_RQPI12FOm(T`l&zM;Sm|Vi;reMI(nGWS6z*a=v{m?88X0l8d=^|FLEQpB6=Sg zTak#5rY1VHg*X}7qtW~CMen1@2YLf&WZ9Qqu)`xI+M<&}8d=f%C_PPY{nQOlBT8Kf zB8`ei*+wxw`s%BZ5xxrwjMCQwq_7Y`jWC7P3p#s92Jqc#=SN2TE+{ZcA9tt=K>;$h zM?BPEEZms08$bq(*5)Q*_!3=}0|x02hjK24 z<#wWo6NS?LKUt&Riw2~YMzjFh_iMA6OS=2Bc zFh&k?Mltr2O&*0{YPbhIM&Ip3$$OBR+gLel1y3ljQW!K62U}xXImklB1?nI~`38AP z&^K$KRBWWJtVnj?fSxJd+M1|`9F_#}z#CBo>J9ZCgZN9flIl1zC2J@EGZ&X606{KLwCp};$U z1Bk)5AQ8=NtRczv@0~hs2|hrri8ey_0r!O+7E#3EMxrw4d_a-O|J}#H!X$NnKL%C? zyc$LHeHSzNVyGPf|LltJRb&XzjQzvyI?e3KKgS3V^}$RB14y%nJ2KR$+=;C0MlvYn z!T)Lz)Kn-lyPNjgfZs^~&xSLyF+-FJ-jrx)4yhfWk_cAdZxG!;)We%f!+$U%>gz-M z!Q!k4)TSZ-k?kBcawzhbxr5#oZrPcgFXHg(Dp*>{Mt>5zaP$&>`F zFtGe0mbN6y4n$Z319HUz>JVjfB5<|dZCV7W%pqs{h3Q&%12&Zx5EFx~3k-Sq+h5a1 zoj(1j^;g<}7vmF#leyj8Sao=BxIeW1#ztm%v@9w}Crl2e!Vuyxm&U|F!8hNX@?xog(I*A`Lhg>Tdt(^%x`D0iZ5JfL(%a zDvr+98uB(!xtbA=VMGp_0J}CpV08!3a6_vw@@gDrkLz0p8XWiiCPD@CG>&M4NSZYJnSI_dsrg`QiEZs^GJi#$WmIpSgJVcOOaF9zZb=z z()Jy-5XfSM;cW+zGW=2ZoJUz$j=Zqv?6yAWFaj|H4MYZT?}Rw ze>QM*w>EIV{kKCd3W!I;cCuOR?Vg5W)XONTIJ;2*{hqthE$=n5ED3euV-!yU(!U3w zZd86ZJdLP;U5t7y<|v-%il>pKt_A~ZJ-q#=ZgCaLA4f7C=u8@NY=~w-#0CRpJ)-b5 zWYrZpHD^VFYCnIvh6GOGDOlkxjFd?F(7Fz)^U3Gv6Y(GwY6kroD}#LLTwo$DZB0Vg$}3_9pPIe6W`F5s86t_JPv%K4T)rGL$v665H3{dav~mDTG#b71ClAc zzy`jU1>O+3BgK(`4yc25Fw{piiTXB1lpBa4+yQj~b$lqS!8h(!MfKZ-qDX1zRxpa4OGY{?K4~u#A+PJ`^=i*93ov^w z6s4f>w<|np3I`76yDZJ5bs~l=#@^$x8^Suic8g4UUE%r1ixufp&p=Z|lyT`otWk>a zEkHHCA*Evf*Zn_wD(MFU&_hXo$6?fcy46t(`Z9l70;Lb+V8a9I12Mt~iw&C|SXJ~I zlzpEL9rFTwfTqR>UgJj#0llCa2YHgATXG0-3Q#c(23cLzC8$A890E}dt%2(Wr`Pv^ zqiOERkm>ux>}ID><0U9g@mmCj(|G+BX9l@Kc1A=Sg1QA>4|0O-fM*Ymq1KM~oZ0!^ z^71aVmpwwQ-v=DA5=f8%TL`d03B}5z0D(aghg|@HtlS);6Zp4Su3i9QP&D8jIQXF8 zL9^fCQ0lNtOKQ}l)gW%LOX~&528v!<1F;Z@TS=`mkW~X{m4UblLsVl>*GUfM?z(>) z6NrC@QWV1w6V#QB>hc;C1B&8fI;#bGVMw~SwK6ck&~9s`FS1iaSOmBwa99qS*AQT< z4##q zbWT>nn?U&m#KlHwjiJ!RNid+}Q|$)M%i>5CVnn5-iVqDT=1ADU56hl@E>;X_Tn4lzESzOWen z8{W}0E7bJsP^07r78Z#1K&gviNXjFW4~jGC+*=CvxKL(isPk;}g%ARn4ub=`awrJV zv^YqJJ46i*r5v;i@79I39n2fv(NvY>;HlreSQr&FsQeC(1MS5Q@eDA;#P%Z@+o_aB z>DbPqd75+!eRbSO*zk^)Qoz83EXo)TXU>iSqF#`8i1PpCq5(m2SkmuWbvUthn32T` z(*a}TAZHX~l*MHJ0h*=7SP_-z&yl;>gw7FS|x}vMjpp{SQ;6jHe8IGl{VIE@HS2_(m ztT_;C4=WBtkD!b{EmRC~G8{`yh7Yym>wuzyMvi1SqJp}{cSx;>qAw&6%`mbQ14>KBx*nrF+!9CbtP|?~j-?-P_ z(((PUGKzw`exI7Ky+9T`p%E|~S4Ig@ldFA0z@TN{>5CVtLUoE1QShM|3JrB`UWl58 z9b()(ePJ;&S4Ig@)38I0oF7si&w?l1Z6rDf%b3ae&j)Nl`a zjJ_+Q$jhnBd%Zc7HBc%x($IcoU;&PvDc;(es7G#d3l7T@O~EE_1d9PX+^1 zf<1gTu6xg9F+Bq9p2<7KgoS`hhQ)PRhDBkDe%b^M364m%R^%NadNRkz)^5L6kmOYT z-&2r%zjN0jdN34t2LMN;q`-niG`F#aB-_7t>U!7C4TWHbMHF$kk*EwVb@(9$-X z6~UV5h^H)bgA@sjbFlNFFpfu9796466S_dRcI@!}jPQ@aX>hg9FV{U#w0M6Eu$g_w z=wbw5tO~3Y5P=J_;7J2h%Dr~B`u=3-sfC_Sh&CY^7+4c*$dc`kDKzAej+h^~n<@|l zx{9FJX1sG>$E^VD5PYF7tiRghaiuVHVuv9|zI6T!`jae#9q5W(%sAZkm3$1g;pDrT z>r)$gg1J80%-dLzOb9Y0QxcetU|UgaN6Xf~Zo28yO6RZ5wn1XKg^`3WqJ${gu|K9e zTDzb>qcsgecp!x-^nHqvZMUgYmPpCv&&oG@+BXzjt#|dBkjfl#wqKa6`ulESyDf8t z;SM$l6%ZTUB$P)$n(Q_LmDB5g1L)_%$Kmx%p!zg(eZ<`B6AbXSuGkI9~_N7|B88l7X%Nm8t%3X8S{6HfnfJgn@*20 zY>WZVaAcwYHZ3<&BI!e1H&q8P@;UlMyrC7|4Ei(t8<0o?B)8m!&qla0 zV>rer%1Ue4>wFDmvY)x>^*)tJxY-Q<+Ps;fy=E8BdBB}IF9CU^(|Ll+PsXvbK&B} zi^+})4RMXz3$5>_=bX(*Byexz7{h@6=b~`$lO6YCH|a6s*P#9};ud~Z^-gQ+=2+iUuC^R#67sgd>NUhF~~vc==96nDIY9_{@M z2G$zM|9q9fGqs*r)Aq=_xvnvJM%r;sFFUO}0Y)#lBKeazv^^)^ify@WDcFD;zi@YX z%$4?}nsu|UfX6uQKW4tKc!ipgMs-$U%cPP?wr6%9ysBDo%`8gSA(Z(>NW<$wGkvG< z_Bgg5g>kB0&Q<>t+p*fd-TDXyquTOr*0%lA*rqO0u5)cyh&i_MNhw<+kIB1mvpC6S zhq#v_+68F=s<9HECw-ePGk%X7_ZUu+nt%Lp$+HcfJ1(twO@qZg# zU+4PzwxUC&s-*J`XN@9L|EVkw#*Ss>h-Bc5J@7Df$CbCU*8j`FxFKZAbS_Db6+Vi= zAz<{MPRX{)eV+Wrlt=5#q1_G#pYGfD0m!>mdd>8j6-CXC-)`f!aTH(OKmpPrE+Cqj z#JX_|96RPRG*4prlqJK$7W%*X(vWY-i_X5@^7#Jo155Wbm0h1$oPVPtzxqq6)PdL1 z;s7l_7}unfKbg+A2VjbEtOV<_HB)?ucX(K(%o?Udw&wCiO%J!w{$~QXkIj-XoUyCa z%%;YLw7mWdq;}#$%&cq(qKmUG8Y=~wp;Zf-}%cu2e+$M}xxo`QO=dyV#)LmOIIp|?4-WXyj?616*>Jp4pY zNDIuTU>f>g0bb}za~Hd`d=*ubPinpJbe|_Ala7DK50M22E>Ah=CdGCjT*L(T?A$}e zHX(yUv&LV-jlmT>Id&+#;k5u)FiwO$6jKxE`s{&Mc-w2KrH9WP^?4k9$dPd7@P4mv z@GQN@GxoH{PQ062B@oni-^6U|RASb%H>T0^Wo;fDQ^~2}Z4dD=4XibrsodPmQ_Gu^ zvlTTmlb27ySQ>e>`f7YHMqCud)Rm-RG|oK#D3CIa$G1oO<%! zntvF`Z?fjdct5SZkH#`20k~JLYM8LvT0=5r^Ov12lAbC2xuyaBL9U4mk=2U@SofjS z%i@0l0;WeTjjCz(FiKE-?vC#ei<=vfX-&$>heFTJNr2x8PTn_`td`y%8>43Ojtd6{ zkR3c+Y{aI*szs{Jc#)H4i>*Gss+E~adH#<~@3YxIo`_Ol^@FZs#P{ytv;7G{LE%|* zZdWhHOxedkzTC!t!9`X~W35^+cD3j3Ra}ml=JD~*Qd2MbhlXzFT(V@z3V!}rkIFOq z!^Z7dvvFf)nxCKFrOeDsZ)s`iWU=|aX0mmlLj@0taq{1W0 zq<5!e*6+z0-@5tzo7_C?t7XNj@+unIPMkQgV4jeXMU2e;Y0)QzO=fS4H{X6i>*g{! zIXP7ei!1S^ZVlr?)|i{Gj6Qo--Pm~9g4K=zyDD-6zt`vJ>aMo=ywU#cHG(L(`6Jmn z2RFU@b>PnCY;J}3*Hwv+ZQ=_*c@HzLN+X5k(4j-~H4`1{56scsEqKi&=d#AtopPyb z{*%2V{rc`7&!iYyat1e(kV{tobKcxkPgoIpmZEvds|G9GD7IEx* zA|&m`=)!#Q9zVE5bM)IOi*8);J+^`=$nY$w|AKQJGR)}r#`ceuj9C4kjBoA z(N8J4s2O|t>GS7bFDjfg8aIB;y%6vq&xvymty=)z%r=1`=Eea((vJ8%sfu^Et)GLp zc(6Wj*({?&0=nL8@83NfJ8t5(@2#~N89+KKWo=S+sz0<@!;uLdVM5E~D{opq#IG>e zxZvzZ<&^lz%*@)NwqLRjM9A#_@`f*%VQk@y<5QQmX-rziU|kH5jn$ix&edDgOx6}dOd3Y;pVU&m-?Jz$aA(0b?CMN(l> z&g;wD@3NnLYZP(d4#y4&a5HFT*@CgjuEr598Pah{)`815Z4UXL?S(^URrK?3TQA|; zcFS+`r7hA-OiT|A(?rd31HNRRmjP$JToL#1?7pL$dG6^>%m|lKNp7uSt|&-6*>owN zPdjd7tWKiQ`+Iw`6T}Tu&LuZ#xUmZxZl4Ao3h&N#X)?dU?(3 zFPgl?lj4Y3249ZxX>+|&wtNvO_prH6@A07{4<%NzR`HCTT^*a;_CL$E_ikgyX|V8c za&qcH68AES`10%=(0V)m1%l%yM)Ajj$y&p%l`0C1AvZU-tM+vRrA^A7s@6P<7q4@u z3Q^9wv=uA7Np<%41o<5~m$$10Pc6;J2o_$yNh;(*R*ZIhHkjJR7nVf|FuJ)#fKiz8 z$R|Bv!x)#;R5!ldgcZfBM9tqd?REa%aQ@)q#`2uQ^QTRU(YiVLcIE8t9BmSO>55-p zL``~Iu}kZkzSo8G4<9`ud_Mp6EC0{ST^-^FKZ$3Dna; zFjStib`k)fH^*I0viA4bZDC=dY;0_N{@|?TlW(SrSBI>-uJ>e`Y?0@I2tR%%3y&x{ z@7Zb>e2mg1NH|D4N>y*lJ8>*2M66}vSmqgSh1afF3JRBw=ef8Ts=r-Xjf+O$w0S zmDwv)fb41(zb3e^D1I_^p2!N~<3n@*}Sac6}L9Puqm-!aKiz}$b)gk929D)<^=;CUexHFZno>L^MeG3+P2*5hAQ#n zKJ8y!J-0UbN5jdyYunyGdz2VZohN_in9Ff8qM|YHPa0S(aX9K%gdI0Zp7Z@edjV~ zQ?f%_Z(j;eI zz9C#P)OWr(m(9Vnu+>iI05t8z0<uIUSmR7HbV%!;B~tMse|wu_iFE?+49ksQ??amsJq(c^|};dVTM z-wvF-bSHF8OjWY;N8Y887qkU4PHq7vMLB5YcB{LpCsSWv+FA>g`{omCN)9^gb(_m% z|A2JDGVGrji~?ZDF6}L1eB7ol!rl{E|sTMWR~vYNnO7 zHLs9Roq>*yj$-Bn;8n4VP5g1p!C>DJH#ZYV#m|*vd!Kf>8T@C?RDHX8(*+I7lbikT zlqwxus=bG6d45Hl(#)kBI6k(j6pLLw63K0%e*1!0(zc40TLCR|w?BQOBDg?uW!$p% z4Gs-ow?GpWm8~ zPY$02uEi|x_n*(WXq1N;9+^@3AMQg)v#zWC$#0i(=7?8^Ri!FN&EQQur(AhsrwJ+d zhSy@1unpRArj~ik9tqmAUok@G@e0cQTsnF&{* zzW&rgM*BcFKfgKk+Byq(*(Z%%0N_zJ=0&6uBXe_R=EjJr?yNPrn*C12G#LP1w8@&za#a|8LMjoK7BtyI;;@ z71#gE3?`}{Q5uMl`9lXCKTI*`Z|5Tf}kGT!SJ&g$kTdT(Qh zvh{~#{*Q_u7haj|{PI@t8SoI9lS8gS4HB*YldTX$IFn766_UXKAB;?c&4E(kmbL@#wvyVy%~bzDQk5bql-n*3)Q*qD=#9P2_2Y ziy=H7tqqk8iubqAl{U2G_qG$^^++i9Ox#(~;x6hN{y&+IGs5|sM7i28zytcPQ7+cYI?;4m-PV}I|8qJ_yt9k zm97*sdi9<3ZnmxFK}Xr*_HFOCz7-e3KgNA&w3%~odRW;|Rl`rNkF+U)`b^H%E zH%$OzrwJw>xRtu07p+Nk=6^LQ1E_@Lj$wvuxtU`3{6> ztAkh`X0!VS^EX$i2Mg^!6)-O&GQq6Xc>-I#7k2i`gdBxg2Tw0oHz_tsSkXYJF8tEw zbbjkL8D1_a!)J`~W&d+@DJBcxi{b*aJ#HVs44b-UT6X~i_-O_!pD_3Tl(NRy$J;hd zwZU*=VM`IW?o)0Zzn3O)?N?N$H>C3luH(#5-L0JXp~fmZv^@I)8#j}KrtbKJAJx+n zEsOSjHd}wnnYC&e6igN+z7hj7A|hH!u6O(r!BB4L;apWk z6smr;=cP%?5&ncLC9QVRjG6~+R>lZ(v2ZThB0A&w?D12esCJAg&;))!Vqne+G5|?k zwtBzy9shTW&>LfCvo+_<*?aN8U5xOVEM2Xc2ji^kT=E*7h^?s^SyjbvCp=M^9mLJT z$=|>!%L}5hSK$3OajWZ&#p$fZ5~hL+w{FxvBjCz%uz?19fdGel+0LR9CC-@O6w7GuUW;Zi0Q`9GFZ zVoK&&x9rL@X`rVHXy_?<=H@j(Kb_;{ES6K8uvr{ZeDkOj-#^ccI2WB$VV@(+T+U5paDnKOF*0*`i^nx9evCZh;Wvk8)|L`TJCmri^Nyw5p1V1uh}*IQD6@P% zV6nbV<%~_^0FT%d5!XJ$-tghU{p$lJD?d+7b9(j72D<)C0h*6nQ8>Q9*|vTX&$_^` zYK>vRgykB1Dx4oU+g3-6neUu_bN-#s3 zrt9lxdOlwsa3WAHBHBH=ZA*3rFV=%yqGqpVi>3(sXCS5tQ#ko)Nc}rM2vm1^>;73@ry$0>)+xz|REn3>u$pMjN*0yEv_9IW5J;o4v?=)6Af*Q;uG1I)K2wt^A{ahqnwbmEiIG1^!eG{ao#P9 z?Xqmemb{!VWLLYHcXDBE(qf@cANfBU=q$W>X}kAgskQ^zL8@#H+m{$bJ_dl+pTf){ zXU7@d=_;DxO$$zFJ#G$9vE0cmr||lW)uu(Y%1dUO*Qi zs#6x9@0)YrvrPPqn(IIjw6L50cg3|ts^*<;o!QKo6Z`JD9&?f5Pq){KuTD{I;xx<4 zR8RXBm}|D$IWB-J*?5ia9qwyjicP!0S#0ZeVDY=@ncGuzZ{G@9Ie*)_o56LKdE%32 zF1Fa6@m(YCe!flfy(tIH2;vo=Y^}rJGze|lZ6OkPx#3D)(v+Dsjj{>~$@6B+PdR>Hmm32{8*$nKLj8+bD7_|j=BPA7!a+9>2}OJ#hp zQhD#H&@Lm*8F;Q{`?#`g&&E8vdvu-ML#181Y&`!mwC9P2xpQ4m0}&5ScFOb`XD_2N z5d0Zso|49WwAy6U*4hlBvmGww;#`gyANOt0U;{AL159>mmT62GXYqEq522+ef`U|} zrCq-X@Au1zn@(~11Q~SLYOWnm{v@`hL@!uPR%3uAc%tfj~<nQQ_RKXt4|1E>HBF7&LpH>X^W1>51z~SB6IFl{TuH+N^fGVOi<^ zw%z2TYD<0m`k9(i`;0exMOFzgI+R9R@|FKDRN|!a_i5n~t1bpcCEtu)p5U0Uw76L# zII+;ReRUa|mt7dE!l%mQb{CfucITQa&Wo_t++V-x9EV2&>*8d-)0rvtEJ9ed{h1d zXL0>?aMWeR3$+*HZRXS@>xiu>XSsBGmrvcQWy?2Cxm8xma$eO_VJSgV(Xr*j;yoC% z{7=mv?yRoU$C(-bqg`NjQfGRN%9BTr_?oJpEaJ|ewnlG{)3@78^a_)l`J2mM07J<> z$4%W99s@ScSk0Lro{}n;A*W>KE-luq&{{*dzieaL=Y8)lAHDbV#$KDII>ieM-g>jY zzHs`Q$iYPtE@3NAN2|A|`@Fkfwa+g;uVT_{ew}w~)7bpYCuzx!l?=WH*kq|y^L_71 zm^}!p2SAKw;f&>1xqkDLa!UDJPcS($T7dq!D!YE-xGXY&Gk}k z-U}1UrxrhSx_8+6lhn54{b!Qiyfic8XKZ#_w&}tRh&*PSwtuf<{TI1;BBo1)d~e+n z11!ocpfMDmsMZwLe|hEqY`(aahhyDm=@1e1WEbb4dVha^Vrx_N1hdVTx1A78imQ3D z4Mb=0>oqowRq<$DqI)UY_0XLkTe9&o@-H*zOy5)=8&`EXQ7SVxqfJRM>L}m$c3zCyz1JWt1^5U52^vrXkR_G^tsH&)zO9J3Y80T=lwBWtN19 z=G2)y#ffa5nhou#*NhJO=0%8Q)cm!#j0 zE;-3?=j`hZZ96$^?;d7(b+3eJkFeD9h_FV!s?*E9u)YX!HH*6K#bmzqTq5UV6(^G| zk|is1C5^L7xY%87tKR=N<5PO*JT7L3*?^M0)24QH=cCEFac1+1)Fm`9c4*O~3Z> z`^@tpH6&8{lR&@5%`XK`YxrfoTE&4nYgF1#1!l&X(eaZQ!!+Jv;{85s`wTQe0D z6kP8by@=c#vYDH!n1D|&3gzFLz2DPw)>hY=;-?xHH}ffet$kD`JZ|EYH^+`0d#yE3 z_@l`j#^&htUM=<9EqR3;#omH42`B5%#`_sYtTOp;%DRQ$@63$YET1_iv0cP-f6y6{ zdQwD8Uc4X1J@(M%<=pSKwTCksyp&U57xps29p2@5D5M0;CpThUz{LaV<_fd&qJ5_b>GtmF{u3V$Cxo76)o3U4B@;E@gSC z{rgN6PZz=Wy8Oy#!y;!lKACglM^4;=vWsc)F1G&Pn+xA!CmeApX?1W>a9xcHk8QuP z!^K$Ab{%JB#nJ?;ZJMsy3Uk`NW5R?kKi=XRu&MU6<%>@~tTo}g%Cp5z%QnSxvPG{i zY=7x@^t;qHY2Z7}u2BNcdZ7jXHBHFBt`7DiCnlwgj3rBI_p;R1J{4Kd`#JR(pA)|E zTmG!3clK3*YhEevy&SvZUmbWMy=Bdm29} z*jgoe_PE7u>(Y#M3@1ISZ#|7&XDh~Vd1jif`8I_b{&Ck>!!T`Amo3_=GyM_2MB~wa zzXnzi2!^UJP4t#lM}JUrYSpQ~Abid;>X!T3?+Z5VTp?g8%MPyW4b5hhjZzt#Tex__A+xFf65Q&|B&lAj|c78+KtL3~u-o%#wukE|Pg6tD7tH;v;S92^ruYjXCpKDp1TfB#aU9NHN zr5n4iz4fZ$4gY*cEz?zM*ClQ&7Q1Dxs$Ke#MPe4G0@ghsG#!nqX^pR`kF@$4#ILKv z>JwBR#HTIN_;_l&lh&395QPI~=NDa)e@ ztp2_2d|#JmPim0$S}EPD5inTaw39d8&%;TYWL#Dh;c{`bk63;~5qo;o zLH4JUK-lIyCh^uv5J=CJ)kw+toauGJ?(^vw?9uU?%>B|D-o_o_xBJL})s19xNcV3q z^5#GNzr&I(Mtj!IiQzAN^YLu{vkIO_D6=na7*=#PE%;u$NO^gleA0@;J}I7lqTidG zA2zeH32$7fymoEU{kJ_#N&ze?24*Qg;*EymwW zm99cD(7cVav%qsV#uP65;8nQnas{d7Us;{x&22lLszomos;J%eG_xjNYH!JtS*d>9 z{Kg!M0{8kgvNc8C@$}0vU?nj%+o!88&YRcVtbsG}O<0lLJf7X*rw`?dd-XZW^8qz&+4Z6|dPA?zuY1+Sg)E zAUA*MiR)KNcQ;0gyt7)FSd%?>tC980!e(2|+htsA4&rBZ3npb=Qml3NHN{Hg3B9b*6Bwk0IO{*t{FW88JGETEV9;?ruJQ{2#}% z=pYY|1RmuWJS%IB`5b47SgDgSP)=`A6?al_jbmvVa^m^N4ZxQ>0vglFmL5k$o-qrz{_ zLe?)f9E{n1{nWcTCP}AM_sVD8XjU(KVSc#fITzrI4%tka>U3Vz?E9zX#PrqS3)biz zHB>YD9OJ(!AjeBj!6^OYQ=tz&;sI?=!KumIQHNG5{pWC~V0sM$bL9J{ml+7QyzWcx zesO;>-N%0Vkw%*$)|#-RwFzOdVPF5(Qc+P?C!8yB*D;w@qia{`_;0f8#)?~E zuFF*~JKDZ57Z-IcsyE4TkIx7eKel{A;L;;`26e}CnkwBK^W~+sXQZvVqnB$^TeGx% z#as7JQgxMw)jgyg&z~ysXf4;%Ju4XN!^>JWw`{IZO|)v=&LZaQQgeoWr&?)J zeYw@NX=93izPZ#D`|2Y3){RR^x*?mhl3m-w7fgGyz2IBwyox#hb9s4woNV!{8KP<_ z7Z)usHsaYB;1`q^m=Tn->VH!DuV=Hj#A~y}Jctf+1BA2LR&T%LI6Fh{8S_NV&gEa0 z@i1~-YPV|7yqwsq672aL_2lp0v_9Rw?oe^e`E%Z99+LZ~ajeC9wFj}mpBaK%AFcVa zt8ioZn{e)$bX?e^nsn{Tjjz}@o&UkT;9t(ds&5l*S{@SFg{%ZT_w8fN(Z0LWN9dEI z&zA{@{OovguV26Zx&Ac1uE{@RV?e&L|C6K@miX^13k3HR=nC!ESt#sv^vRUK$flUK znjH5WakY%dFu69Vi4sctT?=$gJqqV}%ho;Sy%To(-JC~AHBY;P+J&` zdu5!x>>5jAPQquOETH#OwnRlm*(rsHT`fB$y;t_2lZFArCs-Ce%qJ3wX%fcw#s`8h zNtgjdgc515gAe!xJ-Jkxr3>^_%y@{Y_0~$RC&WLI;y4*&_v-t%s8y?4H{F;QQ0sQ@ zba$V-MJ`@rRd6?iyZw!SXxaCGO6JHq0Hm9mqPbONL1UK` zi`S%c<-dA#Ka{)XqwYyQFDo`q*VZ8Jx~B@M`DZ>EY2AFJF+0)nLL%pde=1kY-e~+@ znS2&hCSE+Zc(YAudZ-O3!?6C~wMs@A9F5Zj;xvklpd4h!$HMI|LA+nzPomWta|tj=d|kd^^~ zC=NuVou@6n;M0FiacaH^TAYUu?Edcwe+RSUvG9n4??WlT>Z~a8Z(?*5USx$m z2CQ$+GVrW5LfR%_lO4kLANwrfkq{U7emN&UQyVFXsm-C(PU_h0sC7~6d|GQZj5n)t z)>u{C;$syzX<>@cHK$Z&2o}KqnPIxw^-AzD*SJI9%v?zKn3^M%W18!tN`$QcyM|NP za8SOudCNM&|E9U}zBkES{&C9IoJdjS7b*LmrCm<9{+fDtRj?>&A@S*n(vYVA_D-_S z%V>7`S7+jQ&e+N)AhVzwW?-V^?WZ{-G$S?j>?sc7-eenyMxn^_rVF!pO{9MPa>J-o z@|;S+^Rr{+8u_;Bq+l5ep0~VJ2+t~xKUx;Ll{xx=1hZYUt$6+7Os<;ekDKQ^NP6y* zOSo=f*Cv`%Qd`=5ROR{+m6*5JttZv*zI3xr$FlV0UEQ!R>kkx&y((aRb@=qD$E#nw zcv1QI;lu1O0X*(>a1?Lzfv1Z8iwPm()}OWjUbnU_WJcw47M#w5gI21u;74cS%&=az z@_&W+*AibcuSoiQeFONBvlBSJ9G1OX`;b(9Uf4le8vCv6)$*kmTEa8Z*(zp~JY?FI zY~`uE`#M4Gorl+qMbhMJm7JGTNV%PUKfMS0A z8N}Wed2wbk-;%zYAN2ByxN=3{*|TSnSi|&86{#PxCq+r=&5g}TXl!)>=$jaZcpZ!%{^v;PlX?&B=kcO*HFRtP9sAmU}Ic zkq!Ua^6?rR2FWvxRw?feKk_#RTN;Z7tXXb9S4$M8>9fIx7g@NE<}P`Rlj+qD{8vS@cRW@ zHq)%2OO@K|S34eC8LfL}e@nqS&Ft?-qiQ*M)~lua%$qrT{s#5CtL2q0Xkvx97NvkJ z$IbJbIWvA-a;@>=H5fE?gm=22jPdYMzIJY| zWtb%_I55R*oFm^Y0=d$gd=tny(!6A*E64d&FX#u|Y0O@s9bxQ%OHXKj3n~Qp&$KBo zVTo3I9vYayRdnJRIj0u8B*n6RS*T*hDes`zm(iC@X0p%Uy*{gS#;bT?j7G|&FCdAj zdAg!_g zGP6oeLp+W_*s)4E_H@?F{d*sMZ+^bve@y4-Q^7R9UEhE9v{kN|A55pL#+9yda1&*)|Rk$zvn`$JjIp%D!Uz)Nr z7c1>I;{MQnbKIBg;7xaDZ#;2jvIh1}M7wtS`?(<-R;}Hy5`H@ONa8s>pUk2u22~pp zv-1D@SXzeD`d@wJ*9MQJ?tYEFQ!|$_{v!)zurs0A{Qa(9j);@Vt7+%bE_X8dVR6T1 zO+B7=CS2;Y{H%o$0o!@X%J!f9Co}5iohFO?g^rjA5#d9LHxBY=+7KwoHV|0iZeeTw~QK<(MP?XYVb5s_fcu zVL((;Qd%kLQc}7>x}`xyIyT*@ASET;(j_1uAh}UNHr*gC-L>gHYx|bp`QABa&YXY# znSbVY9EN#@y`N{TXT^PA_jO(COI4C+jBB8kc34j5k`thzfm9% zx8iMJ{^FFKM9wZcBCBzHb2ZQ+a+YrDyFe|>NS}>Y~F7zD*MH=GkurwvdS%fF#0n z+4a1tqK{3Zg5+VvI06@Y?zs;soIMdG(N}EZlA11n2WW`jbgA)!hvw43u9yqL(wvJaq*>P zPtw)|fUA@;gpzA)#^+rv06>M8g#`HAfOpB4GYh}Tda!m4ddS;a5>W1lf&n!(D~49~ zEFnMJjVJ;kU;%M3A&ZU@*Ry-KY5IhjnZxLnQZyg>`TBsWd|XDxc0%}!#LS#Mg4X|F zZzflcU=35c$QpecUkFGs6x0qred1=qH>|PI_1y2_lOz3-xUpw&`VQh)X}>(CGgaXZ zc*(BfQbry*aS$V1No%g8yhtdA&90B( zry4Fr5TzBgeI39`Yyg?fQiVzTpmli@y$^DcZdLw+b<>!7?^8!PBo+usC$w$~fCLW) zI*AMRx-&(Mtt6TBPQwE(x{!9Lqub7R=f34ZPv=*6K$|fpV!JwwxpfZii~6P+0krZ} zA@A|G1@Kxz7W`R$;nSg13uK ze0@%K=OpmFwZCMNa+&onbtSHqlsCT{)Q`tI$bO}^N$@zykl!BYvK!JUP*-U}h&;j!7PX$sq{4^7p&58q&s(u8Uf z4RjXzIy*a4i+>=5U(kt@dL-Q3+9-!A1N{cm6ArbR!lZ%{iu3I6%W2^p$9+yO_SnOZ z=oAv3EKodpAq9rTDV}PWtuAbzgP>bhIlv(@mzalgB`TrB)2sn+zc02sl0Vb{7futW zdJ7ec0nnj)R%@{smWH!fsCOuG9f>dIS_m;cjkhrM8%J zv!0Z@_%po{SIOoXOq#(OFLYSpN>zmi_qA!pwAg9GZB`;mIEp>Clr(Vfu(lK)>ky51QtW6R zc=C)lP~H&W62Z;(_20zegL>Q+kJH_Ym>?m(o&b)#%%!ooW~H@~$AiY2_U&4@EUL#X z{N;EjY{q_8A*>bSTHj$P<#yBcL4jH2xZ!9X{N`z0ju_~fFBiPOMPCcMZ5vn`Ioj1Z z8kJ?=bd&;UQX0`hnw`2qhDfcUrj*g*1Jae$g@R09d7zscOuQ8t!gzdx5t7z69cMjK zOed|r$W7_C?@-e)?zDJHoR|ZYvwn{Vny3JWFpC0>L|#k>wqMx2Sc{dnn9Ze)=V5NF zX|OAMqynlSK5wd`-DY%HZs#msT6V^cE>hFb?0A|2anTupQ;DAfx(Cq*AO?x!6>yB3 zNK6!@<|t6LKSP(zB0@D=4RR;o0|ECu&$5NH5k2 zp9T^uFT@O~yL+d7eNb?`yWzdy+{@**o#{9F!Jw{EGm-;TT7-lyYSMg8^Siw$7f+d- zuEJBJslD zJIxOCR{K&ouiPXad9n5V)hzt5V5#ql&A}=Hec3*d;7gHJ(q(ql$Sy`WSsT&Ex8Vs{sC{R7q5lC^h1J>cPPRsSr7HhQd`}1ViycG8JRlwxhp= zg=|P7=1)tusdz$6FrZ-tBDIHD^+?EDs6zPgsZ!_}+ZL?23_?-gD&$v)|JUV0S#%3Bo+)tRoP?zC~h9Z>g*o#p|0 zGeT&Ah1w`B78|`H{srRE#ceT00HhEZs$6A}OF=?8iyFu}t&r(@Ezd%ApaNCgN7Y$S zjL7ho_RsV_zB=C_proDvI=QvA04wDJCab9s*m-C6Dluvp-FscwH^}p#(-aV4m9tUJ zK$nzDne5gtcCnSK=F-=J>TmFMSPHMLJdjvs$;UIFYOyzc4pkd`f|4c)i#oYlN;I|r zY=FUvY#=3xiQHdwCL0JA4t)!x4uSZc{($rCj0cAo&edYo8n?KhL6>IfYd6it=DWX! zy8_{;`qc%1zSe$Jm^kmvOV5uv1o#u7G*;A01Yu}4xZ2hO(PN&_Nie1F4;Ev50DT02+jH1%IA{8HBqJ*R*WO->R{$RoK*(%jD zelQFYxR~g334mNDT}*(-WL?7=FhAKdg*-U3U$lkIBn#`^L>8rMQJF5$ty+%%@Zkfe zufR<#a#U71pq6`Q)|Tr`1^VZP6tPm0XO*?4=I|aMh<@j_9BdV=k}aY7S%7-9IW7sJ zVbPe345KcU0{`c){pk?m*1-=+ad*J5cJ^O|Af$zmB(S6!dmU}ArH`k68u{>nRbEjs zcc=M0@@c7_G>D~HNY;Q(a20HI07yvG0KsHf+7}RrTKpLTS59uFn6KME&b`vBv5^C+ zCJycl@RQiH$I#qSf9g&xgJ;Jfugwp!8@KP|>QF6X`xG2Qy*E!N*-xoSz3H~-P zpic1ZrC3WJI%>-7M6-`zTYG!MCp~%juwCJdN!=QoWbN$@&ohp3(OSTM(xKeMLcIz{ z^X(GkfPyuPjSg9BCMS|OQX#UKmywnyMpWVekv(GtA6PJ%u4YS`We;#5_&jUEM zT#R{SC5LXM6q&PuLE3}G_*wYrA&C6trUYacPj7iGcat(rIGi0=t`Fn{9|XNOX^(7u zk(P5rfj4)y(wA9q`@Z{o)t7z-?|EVbw0=6VZv8H*Th%+`Sa(um*>Z#a#E>{W0-mf_XU43 zu`@PpiHw~skVVAf!Y%2%p(5TV_E~7n6xG@?i{{-);(*VRU*3Es57&@4R*!@3=3l>C zNr;P+a(vU06D7iY`s{>bPX+BF;V}yjT@U*r)8ogF8F+ZAHK}af1Pb@OA;cPYq~k^9U(U8AML=`2lsJC@oE|>e&~RqDJ+TE*d?iU4B1Xz}2nI+qc)R zjK0rfMcB08D>w|&2)RI(phj=WR8c>Xy+qvm`ihEgAN)u^pUZr4ddmoq11$~8zUL^W zI$XWTPAlO&e9|+2JXYEGybz3(F()S{$qSb|Bz$N($zoJzrP+Z(w=I7#z{JUkjy7{n zntQHH0D`anEaWmOP4AmTMi4(VGa-TgrswB~`HLXFOX8^4?fDdaA$YWfKvlSF{EG7B z37fd;eL}9oA|4^ihUu&EI3^7N=Y8!-CBEAl5Su))# zPs{Y)Po&+DUOH+Zs~x01DaaEh=kbdHLvCv2PVmB`K0PrupiqI}qe(%v;-WdWMM@7y zL7jJU0V%2wx|?wQVm24+O~?M?Po16=9?Vn4lk#j)tzG?@hJ~cF;S|yQ={n#jK#8jE z8L=M$5Frf};U&(m+l>Pu8g}llS&b>$PR?hq*)I={C#=iw=Jq{?D8AeHD8^w`72{-u ziRsiFvq!fAI7}>#%(&OgEo%ajZ?Kp`>IM7C3-=glw?bgo9t0-~D;?&ct4=%9Wc{2V zcfW;#yiyy65AO|T|G4v$@b2lmA+xlg#tW9pUr#)5eA~Udv!}H3+JqYCn(l}xU>1L2 zQI5$E7fwh@dO8V0<6@A_jmGky8Qxop2=|kx5On=;z?Dm(n+T!l72T){gMlcjO}Z!E zK1`6OMHkjp#CAdVUNM>T-IHT(8V*NwWXDB_ZWwgfWn+|$hn6-cWtPnbiv$%VMC|=3 z8y@w}MVFPsA0geV0-Rf=^|H zaQGW_j~wi9eUL@btLZB4f7IOsnNwPL?<>R8*nCdCETL){<&qecS8BA2OAejndLCHW z1*36n`UwD)ahxpT>D+sqO4HvrXIN#1eYi7Ai4(y2#hg$Q2^kM1V6j#zun~U1**vx% zYUBO>#I_{mkRu;`DXB+wmm@cN4j{5L_(_JtORz# zr1^xSDW{u=S+wUJm01?t(+mX|F+b4mbl?>xmsisU93foNdQ z1$mW;G}wf{>x^&!7*p-X)=>^HEpS;3RwQ|i-h{Aj*-h^1=;%P$jMO)i-}FQ*n{goA zDp*wQGX~ozsX0HC04_C5#mZ{4GMJ}8yU6f?G#FDjz`p|ZvEKnOm1uYuA3u@?h&7Xf zo;Q$j`$NH}m8k~ao+3VNP3y;B{XJQ}5A(LLXCl%yE5GM~ z1*kn^;q4fXTHFJ2xPA=ZF0qN#PF2v{H}mYP-2gXN$qtydyZ+>~()%c!hz+CNlDAn` zYE?@VrLoJe#UFW0rQYE=Ah+)NqB--%L8zK#TRH;l9b?E&g>69-GvHN=NHPs zi;(_y6bZ2JZ7ldMBkV-*bKf->i`4OSeWb+NULc~*HfSKg|0(#S9(r~&DG0|-XE3fl zk5@BVr32@PtMShc3x&D4xiB!X&obC$kWf%iPxeMY?~|S%@2G%(*SC8^iT&TuP(8y2 z-!l5qq@@SNw@Zk=_FZ&h(VhRYS{WqxVISb1jAroJJ^+|!C;;0H*Y9gX1?4YazEqqG z-cPZ7OM(eT!gJ6nED+KO(E$*ViGOEwe+(2z3Y~#C=MsznF358p&q03l*!9RH;gC-5^d<@1}}WR`EZhbL@NJE_04v^3Ju+glbL8%vOon5ZrB z2|4ZtvI-jN&70F$=;$37*w{@ZgoLd)F7qZ2t`($Gg)DJg#E?)?-hiplk~CpJ5H!n~ zC_EKGuNv4Wc*mXcKg_}|!u$dMZI5N1g!LceBjCQUD%KK~YUtAPiXME`KmS{%06N=a zOZoQBZx=x{jsFo|#Pg8NC(WBolvJkWs@jR_zU%vI5LE1B2lpmW` zxRDBZsZw|-5Oe+ z<;C{)cH(tLe*R=o9N}oH@i}*8-oEUJWzbMpSI;`@O5`X27ADiY?!n*0K@O8QX5FO1;wNDP}2g4vtN<=bkGrD27oQ2gxbe}ON@Z*S^jHc1@cHxSH_?`kaAS^-iY3l}U)78l-Umk$cf_nP zUIRvw#bB;nP;0f2IMIAHs5`~!8*5$(%O0lhAI0s;zuKveLKZbe$a3d=b*0NK_ zYV@b19@?XBblaysfStV}1)?8i%d6`@&KHdE7Tb?nY@O!zb z!20?!SMAh+P2LMwlM+K6OTo%HF5GiaO8@uoPC(p{$>{~>?-v6ko}EV%<8W_Hmd7-l z3PfJkQ{{X0iEnqVodU(de}4riUMFxe(BJs&A#fSNdVZsAFp1%}e)G>m0~@kJGC?eG zmi&7|Mk53pDj8$KcOl>#_4oG2Dh)P7_2t%=e_sK=pag*p4V_TIB;)tTO`!RI@d3o% zr|AtvhTQ)&xm5~>!`FdxkT$HN1HG%Oiv^GI&37RmNh(IhRkJ5go;2@#m7u|Qn6Y-%UZ&CSeQZTjDTslIbhAkGe0 z7ukbkst7;|gm>>sfBEv|%9QQx=nmTA^0K<8r$Aml5dG%JT7nFu&KQVhM*yx81e8Fo zufNUdC`x6XPq#jN2D9L`v%3iu_!zsOL zYHHUuQ=c6)!P@*1KpQ=B6geo!3@Ct%m=6l-2qdTg{JXw^=gEqPi$((CX=D>CYb8Pu zPal11H5&s(>Sk?_^VxyyfDQPa@$L7wJpsnH0X){D{n6Bt?8Z%=E=p!*X2y55)CfUt z(uWA>lfOJPNCe@uH}v6!A&l?ic?~tXliCBwfS;=*3!uP?x)tB|)XB8KhT- z5ApWo_&$TKxX1(_yPm}5VtfUDfh4?v+PT`^rJ!isdyON)I06j7W}_v`seYRN&rX3H z-(>cwe)@pB&zQfx4_q^RyWDS_Bts&( z$xX&{_;CAUz`Y_L4#d^{O6e$}g4N&u@d`8{#_-&~hkRAX33g6OdaW_H9I-wyS2FOy zfQzG8KV<=l=mP7CcB-LAqTk=SYc*A8M@JR_CkAR~eHDN`Kpxmr2in4kLyg;SqUfOo z1_r7IVcabQ&QZ=F*Hs5DcWZOOl=QxaT_d%SS!|71TWp;z(ZCf$PvxS2e&pDZ7uwZsM2ymA)Yk=!Has+YY^mwHwH9hD)?L$R68M@j!Y>IoCNv?!K3aAWH<> z;3VcscgadH8P!i(a9pynGRG!?o`PU)ZEc;UWE=Y~c=w+`hYrf)9{Nawm#4ir za=gKjYWV-GTc9D?5hgX|UjURN^V<}bM~~DxU#~c)fKqT#QCKC_Pac#8*R6MhHjn*$ z!E#Z2Hm8{mQpP;9moGInwD5w!f{F$5OjAZX=VgQypV6%kSss3mtMi=zJ6QBbEhMwp z!W%>ausQqwVy%+s^Rnjw?~C2g?3G_c-4}-0)eeCjUeg7im^cw(_Guo&QjS56@fwg} ze%8LtjcCRK?vtiyW+UlIzEve4ySM3#V@fx6pSwBN;qc(<&-xtNn7BBgzXSjkM%oJ# z6D{fGS&x-_e*=ofZ%KVX07wWN`7l{pJHa=1n;H&bo&t~}ZFqQC{VvN>LYnsf8@s@L zR#X7k50xQs$I$$d-8F7dqQbC^HcglIiMIuM*oL;C8h1Gz?|$X z_r7JEygfy5hirA(?U&|_I!|nA4HZNc`O1y)iV2Y`jp&>)g*d*!wmBE?|9`NqRM$O^jg3;Y?+`M*EB$I%|Q#-VxVFj=iSz6gh zXZ@q8fFS1{2mj7??!1sf9cEqTueRx8EcOd$ckAEzzVa2VO8vU!rnb~@i-_uRBBy`l zRc%mbyNFU=j=pL~|Ek!x1=nW2%Ar-Oi92VPt-*w@vM5@S#)~z*2WvxXuozm2f*+gX zRlyUrkf>nEy1X#ZajeLYQ#KRxfEh8dw_t%GVSprX9-58)?|lRJ=(QE9|HU`ZrikCu ziSthfkNUg&jiJ@R9l&{Qkgk@zy3PiG*X1B%h~HcOF~AG9Yyp%Ru$;;G4^lm&K&+uU z#3QjqLEgY2IAU{i6aUMXwgQ+RC_N9#@gPw33Mu~d<*#4zG)v?tXJe16@F-^YMdC~C z`3yxKzJ8(avud7md}*&@vK~G>5C{=&kLLUlrk&~QI{IWAko%ZEwkHtUYhIZ&m)gZ`j(dLxt9}#^thFPot!JS^NRJbB_QI zKJ|Ll6VPhSaDcHm2OgkvWoJ5ZMBw$j@-IV`8soZ1DEpBI{CW<}t;UH!F*=#(H_r49 z!<(-U4}ooVIi*51o9_HD*}%=fkj9B6wa-AD!|RJ6erEBaouC6m<~4Q@Lj*C-R4hY8 z2~SqxMuF4UzqkN*B&CelbdQ$+hfvEI8PMsBaod1m;K^Tz12Xrd`may@WncIbC^`*r2xYf z2jm6V9|!;4bMSx-p~zE7EWLNSM0mKm&%AZMR(NpLvQj_zf$t5d;iBDw?W^8)Y5p)h>9JTWl5;MdlD2P4pF<=h!J(HvG9g{hv95)qH8QqlN z{`f$^3(Z4K{Veet;1?tlSvj1$oj;f>d;7lXb7Gqy@tbHj+rYclW5kUP7sI4vnJ3;~ zpt_%q$+#nVZB98{6y2i}x+zp*&7lgIiKTg;-?&(geB~?jR5X=Od#K?VQ68s~sM>Wt z9|EPqx_6v^KsM|Bv^!Jch;iq-p4xMv6?>qIQl1+JWrhy;x*ImD0~d`6s_-+~0i&OP zkZ&c7{PKhJ;{Mx-P%1G#U6R0zHP=_gEo&7X!|l56?P+^JC1=0y#oF&BdK&VeTX(mr z->*pJSOU~`KBWnZ-pdy+SB^y$F&4hnEWbZXLN~&<{>!A#B2Ky=2N;=xgj)L}ksTp| zZvL1193U3Y4U=6a*V{O>tsfqA?=R#X&u^FMQ>_~UcLS%@I&Z4{QTt)%#Tivy z2K&7G=%yU1hT#!8@3fAcn3%I|Q1CQ@L=M()M)Pyjo(%JkyP%Pch_}=tX2nlnc=M0r zW84+6LO3+nE0m6G-5=Q~3dS&6K|B9BH)JtiawBgnn%4LSn&VZ2dLWCmK;kG_OmOru z{HyFf74GL>GYoiO{ly{Y1xS1)kHJz))o0_QIRVQXtTgai@)oUD!(_6dH4Zim=}CT1 z=+=1?_tnyhr8|7{)dE9!*Z=AZc^k-{VXp|eFY)jG*z{~b{y#G!yg?*Vj}*{?FirHI z2CL&=0tM>Do|o9MQEjVEi%OA_hr7TK62_w$L3CHFMe$dgZXw$Aw^bvUAVoZ9T-7$= zV2CQRAjYm5xj9&nDG_Y+7Nllec)$rzwP<|(gI^A?p}yftHXxVcV*Ae(jFfHh#^GPi zfq8yF5PzAhS0fGN4f#Mp`H!>fGsc`C@Ao2qc?SZny@vI{zb)~<%VH#eVIE+&|1a+T zyKmvY-TgANMUeKXkil=FA?cIrwub7k1$&NlRy6-J@J2QN_gStwSr=QK*Yb3sxdvemzv;hQrCGcWVdv`MW?$f3z7Xxp z7`5zkVBbh<>JvFnyPV8a$&Bj#&{frJoql=ITb0@Lls&yuP{HH$l#u*h-a&h|uku=q z=5-n5#vrs_tpq3!Fi9kg62R2itpv)0Z@=E52G~=8J#H{N!T^z(&KctCmBX1Oh@n$P z|B-8fm|4H6RT?{B`$^-J_Dpg~<)|sgq|s?U8^5BIGdkyQwyHU?VF+JTV5=xHqNQox9MsgfOWpXao>qrFmk^=40Zb4pubzx*0!Ll2$y5z@m7R}@D)OYxsKgAo zw|9=^Y1*%`Ze1U_6dStXN`I$6*PDhfVY0s*$m&LR-fCejXMN=^X(KO{4(zc6jq9C@ zW`Hu900RRfn}(K_1K{>6GP1He9i5$eN>UfI%H=oE2N2{n<*C#Kk;hK@nW^&B3`WLf zW)=}n%Us%a1>zc>8&sI`b=2X zY_@UYCZ5-gCA^|qJZGSzQwn+e_QPm_N+tr$XydjT4gs}PM4@2j5_m zv$Gr4A2^DOe~AI{{YvPnPx%duieC-+aZ#}|zT>&d>q9PWolm_xu5Ub$!*#x478UX} z%4Y1%wxvVU;C3mOk(&BqA{MFC*n>E*j0Erk7erj8!N5G%Qhu*9FM;ZFw&L+k@z|d@ z_qgtCy9am1L8Q!mQ{;H`g410wPoyY1S5@xX`3A{9tsAm72Bv5EpgMgmucTDUV>y@` z2$=ky)k=gq&EEV#&cS~S9{qM@W#tu~GM*=z$C@dX7OsR5h6apiPytqwvrB8&5O4K> zfj`MPi6>Ej`-94Yz|8gu7M_Q#0z%6r$~960MgrDVs|rMYt}WJCDLuR0>2o8&$!v2U zBMp5Jvk_mM^2<9}U#r?zQ|L9Cw58C4TCQD%74Y(yHQGMiR7dz9z7AO}1tv^`2)V%~ z@IRbs^x%Hcigq<5DRodYi=A9Z1H1}ik^j^n2S>Zz4RX70{yZz4hUUt53~aKK@`zXz zh}GB_7K4&RGcO6^6>OFB@U8z+lvVKuuSPA*s(BB(goO|jho4?t+vYm*oN06{>LdIQ zXMS&#}v&o&=xJJXF-&Aq#=g$1T*@T9M`ZU-T6N~by!(~1YHl7c4oBZ7?HP>xZCwK=SSf?vlIf2AC7Zyeu2Fe zJOO)0;*lq13Wj7j&OZ)k63;EJA%5zyo<*8X>NIWb;xIv#iPH-O=aXZKiRzWdBW3Cd z+qTBCo<=tq7d^C7cA%%r^E-Lvis5f_i3Swxi$6Jjt04y~eBxN!9(iJItg z2_pqUBW!f=bng(B=zqLN-PUUdkyO3m)o1x053AWGaeTy==3@yT_0fo|beq;MBzWf6 zj4OUGovaVWSCEwpKNUki1*q{BfZKY21)|qNC;vY9eb&^}w0igH=m$TLh`E{l`JTM- z;CeZr&UDuE%L4KB^gA%U#{JeA^e%^PJ9|l9)^uVVp^}YUpJmP#yjj}G?%s+SzV0=X zaUtiuY`i|bQ{Q0AW~G{wHumV|6WB3iFt_6Z@Hpw+d4LIGzEbM)Hh3*Mg?38l)a4Gy zkG1|ZeTc<_drBjm7gK#eu%L6Rfli@mv;O6|H}gI1jA8~pG~VOrjcfZ+y9uX*ha>VN zV%S#sfoYdtc-@b`#5yyx1mAMAT^)%NLpuev&P*U<`q7`rM&CrXo{t1k;s*yhWCA0U z^8MO~XQQvdt4EkVA}l`msLRa}){D?_oZHmUGL=4FH&(-DIZ@`U@pr&nr@Pvu$nYB? z2{$Qq?7dh|-%(54Fp^Skq`Y`+eswt_Xfcsf+Bka0KN^zq?%g}!Boe9)a$xRfEl7HR zL;Xk)7^zAChuAE@$2Ap{zj6cRhj>l|91O^&_ys)xeC;#6WYWm*M^IZWssL&DV%~Z~ zwL2aJpY>8c%s-=|jVsnoJZ&yUCbJ{&@kh!Cb50ao`LH!!o6lTwd2RCR^pQeJ`{0?JnkEi0I;_1Z4UU}+ukFsZ@N)rnLp5i|Bxw#X zW4T;LRM&lOK#BvpC{ena(-{ls370Z70z)s2RJKHg>I*zkLkNy0)Z|hf$FQikaXk~d_&n$xS zq9PINJ1$(WSwISbFx*sxsielM@G^_ngd}bqg>yW{JOi_?2a|}^qQ{blQ^E+%iD<_g zU5%2Mc%|lQxJaW2pw3}GR>Fs@fx_xtOnkgc3BtP1745WkhQ{;cdA8S1Q1tObY)hlj zOrV($+TJ|nvg`~bjzg;!c-8zfeT+$@`GIZkMHsK`_>*nEK~+_u zZmijGo(7*@wOQVRENb{_qH$^#uDi>b(E31LB2CTso54K!p?Mr)CO-|?rV$_P&*txQ z*tScKeWv5!>wP2cpelYy;&-;x{z3)oWUXz$M0PcSe2Bt!cA$*5bW-(TSm|INIvXWO zbGg2^e{!@ntB_fC)%r-pxY@;unsSw~aiYu#>Z4q6b^chH?AWyOig}|;ZlSV=oO)4o z!^u1czvjo=R;!V0^5X_RyX*RD=Zl7&WtTIHI!ER#EePE!~zeq{n^xn;yR<0Sh5;x9QOxBq7IX3i|;WLtfRdvv$zqu||u5!?t9Wie#_XekG zsjMt5Y4ywWoJ@p8uROU3xFMZhZVaFrIhdEOpC_gVue4NZu+Rx(g*)Ys9dYU5D#lCi zX>;!DB|fG{6%@j}b|ev=&R8SGYLj~q{70IKqELF%3gHyl8=D?CFmpRBHDEz90CPTEp^M!SjIHWeb-O@w%OaS zrrl;;q!y$vIZZna*cs89hQ5a7joNG8mzdf3sy&*S{tot?<>Sd2xqY$xj3LhH(c+|G zWTU`38%@X7hQoie|ewdVsc5&+3h=942QFUs`rmihPW5vi3?5lQ^Trv zSUc0(piZp@EhhTIYZBXzdjNS5lsz2bzu=U}uyy3SlBR6TNm^yIZJ(xb-w`$LFEXQx zCn%Dga%CBLq71J$I$-sW{oXXRnC9*W^wl$-YN2A7r;Pw5YkxT+f_!l?<$4x4B7Us!oqi)vcoj7z8kSf57}Fz0fOg%XUO6#x zdMf|8Fy~|)t+s@cq0H!oOQIfa;N$DvU0GOo-2Iika=iw1j~7(FSf`!~q)itl#Trn~ zrXMAYri>)SV2CIhB4*h>DI+y>c{`)8fEO~bG!c1^TyWCyu?g3`jvr|=dpUQ*AQkI3 zsUaiD$0k#_a9=SEt(d#;E1>-L_*OTY-J9iryJ1?b8x$ zcJCZICPa86LVkH8GVry>lr3C*lN2^{t2@}XaXegTA9Eqz3=sk?5HZmMKfx!&*kkyTAg*m#9Yb}tW#YL+uP?%OW zS=4Hvm-53U3it1D&$nqEvhxBhhPKG@fu#SVAn(Y=BvyKXaDGWSZCxnrtUdIy); zQsXX%+Ss{-&|J3bo|HKBLRK_MR?wO}$H*!$BMU+rExwIxbFUj(Pi`jD?L%@;7i42)vP4@W4r*wP zcCG%TV$$&DNu+I+Myi*{`OmRx>yOshfsLE^(){%P!(_G_RQ?i1o|GQ8w9)1Fc+X*i z*rew$wf*8}yaQO0?CRH` zaBiP@=ZV8ZK4@s;5<>FVM(uvNqSPf_$Q>Z)v^@4~IH zXwiN1`_)Z3rIH`3S~B@e*tf=;fX~mG87Vb~l=pGNPL5sEj@Gj0QsuG5IbX6}TcZxH zl^UhZUfGq%O*I%G4i2gSW{V9hSmNf{{HGLP5^$Xc4*m4b{3z!3LE}8bP5`JK?~Fg8 z-f3_t%!>C1`rnDW=Up53(kc19Xd~)tSNjDFnhBy0Wh>db);~h;w7c4no=RtmnA%pT zk;~IF@HP7WynIv%e6n^>X=rS4r1@;GYIb-o%khpRG6otSZD1E0lyX)>OJ36~l5-cU z$l~0~n_G}kwXwn4!|{(I=FngqwbdEpl$eRPfZ9`M8YMkW@SO6Yq@$-B7)Fm`F*y1D zaJ@tBp!XI3!O2Mn_tKi0JR$VZ*6p##$`j#mPomQ(>@^iH->WYsfwR}98_oHf5ks@& z*~F>me3pYRuRvT%fbxjfB~>Yvw`Up@@*dK7iuU{5%mzt}Pm#nQ#}arDz-=H@%H9C7 z04}GOF*Y1mC#5`1_0ywfU%5v5GK5Z_u7$?m=MKkE`%7G=V3M$N`)l5XK`)Z?m zQSUI*%+?!{l^Ru~yb2asHGi0lFn^5IaIwO^nXs%@@rd(e@GQ%ilJq!vfheC8sXl<5Y5|jok5a_@ zv!VmEvpbb9YH(->+SS=yfQKz}(p0?4a|*!XgEeMXo_ zT1x7mWHv|kWzZS+%LZ|NQS?O$5h+n{SZV3yJdWK#6*{yyC_UN@NWI}R(vjY^4Gn{Q z$+M4t86sMXLG;c$$bju&WMkD6HQ4`h{-&cyC zGc1nq!)GDq8Ah3xw|c-4S@`$_6dVne(JSF?-tF7z7FP=U7GJ{&R$teOeoTu#CW;}( zbC2O%V55P0eA6qU@XFLKet8pgIR~FqFYCKaBSLa;xFRP5FzeO?!WN$o>&=gM$bn?- z7l4uz0_&#`n!dHCjc`T82gBwUQlSOIJg=5n0l-Nqs;=LI$?y-e>r;hzO7%@!b#I!s z*+~uS<81aB3u0Poul3y5H`g?T1vm_^j3bmzHHAL}3B~@rIEiIY9ynoIxcd;;>`F9$ zujW}DAzooD;#uiS*LJPOt2=o5r1Uw_wvo+}b5iQi1QS2J8N)+r@!l--Z@@=B3I?3T z8(cQN)A-Ur#CL{jrvigO-kwefhCJd#3BMqrC`kopkW^2~qa{RZqh4zpOWzZAbvE%< zS1%d7%VI^2X4Qqu7!n$)P{Ct!60G=KxGS&;F*y1pj5eR`&-sbux@og#Z|ECA_^Eg7 z#!^&7JQIyFpoKq(Vk_}HW)jrXBfp7aoREnulM0WOPS(7Hq*caXuhd@qP_$BsY+C?< zXfB<3hSR+4Taqt^qJ--)?q6`g0nqV_dEjuWUx_~vHOF@vf^-hnPa0CKvp7`$bZfb< zwbD+;Yo@P9GE2aF-^%T?WDW56eC^+3*1)lRwj6$0SX?Ln1Ji@Pm7}WcVpStWz#60J z(YX>GP2X2TZ+3Oa2cj5r-rjTAK07z-e0osB{JUsF9^Ye_NqM4E4v{=!r6I$CJALr(QjEaMpkdOjsVSGh2Ym*LI z|H~}oDJm{wJ*&v60`s#z|yR9~3XmyR-F*)smd@s~vxX_x0 zhBn5W7V}~qjnGiQDS$?DV48Wg38~WC089kBLMO2J+KQP% z(ynH3t91*8mkn(Nz%kTr&CYA;O@IK$KuAa^B`qUk6d4uO7ZDjb6B{4j=G6~>{SvG8 zFD`%|bpuNTpE53d2JQ6dn6KT(ez``3FFq`^U6-RdhRbg)e1$`OxpRx#LLOomp(VR= zKHdK|!>Bn7SO={o=uaNtRLD$wjh+6<_ofx)^s;Z)HJvL#H?_03Zr&tbh>hjt+2XVJ)qFnZpdU`8 zTj@j$0}*0C?uP8-AAvhMfPZcMpR0oton}J~9?yN_Ap2A}-RyXe;v}16{AaDJ&Fs#B zc`xa@P$8qC+Kz(MiU|$v+6!^yHfiaBPczenx+BbQWEM-k!Lz1W-Gxu}t+*XiP(H6} zQms--4Joeu1fMuGmhd$xh=j%IJR2HFIpueW54B>_+sd z@a&R^veIZpi16bURcgo*sM;_*1ty?ArF9MG-1<_X3|4Ndsx;N7*!LS|8_u`{fC<{o zdx>Qv%+?H@qzI{x&Ww>4ut*PsnE;jtINeRq8U11-lk%#-ATMyc2LeHTL|H$i(+y82 zg{qD_(iJIDUg9LCo{!M$!MBbHNuTGulIV?OP=B{*E0y=+g~jle?dm><@<5@N{_V8< z5f!q;n;#ORSFT2nwmb|Sd+H_0kLtW0;??zZ2&PpeN4~UC-Z>cv>!@_iPOAW=)rh67 zYCdorUI@rAsU@}^(zm>5mU4c1ETB}9$YfkcL2WK_K%eR~uR?kL`p&iETXpLKRH;_3 zc-!Ep{?W@|#`7tDfs9y%g1)pI(fvLR8%@oa{91yMDjsf4nEoN6Y*t(lP3&WLkIT^_ zu(VadWv8*k+2BRCg2s!~>fJ+Csd4J8v^(HIT>J~o3Bc5{prQFyvlBs9u42Nw$5X%S zIH#mtMmVq2j+;98sQY#;|^IfC(l zj}3eUo5v2;D!!Ys3}h*<^y0;*?D}H)8Hd2SdX;y9E8*>^KdjxA{yI<B2phnkbSQ zhLMR&S~X`zR(F{j4>~kmXZ&!pPyCJb`N2tIkDXVcUF~*F+(wyGQedK?sX2o?NY7ul z-Us!HaQwkm7!jYeWa|MwKE$~shWx+u4=YOOQ+qeIKn3|fm^kA5RyjYEVw9PN% z!|8p_+qd-_$Qnjr%BsMn62W@K(+O~u zqYqm{8wDZwc!%#cMmQYS3n(kBBQ_u0%u3sq@Lis$7%qEm92UV$8aNl|nn=u^nKL3d zo5McV^kmOrAWJxjO2<#+8ovqZd0WYA_?((pW(N1QKZ@ZivCoLg`?EJ-Jr}=YO5I0m zJXFvQ$B$7<2i3s|7o++M=!C_4*Y{|v?2CyunJRe;of?OWw*o5pZ6<}&s#A|BgcYmt zZ>hdcVR|muD~4_J8(3bAi5Q>YoY!u|A%p(oU;^Lb9}e=4N!oGIF8Cj9oq0S|ZP>@r z$VkfANz9P3hqA9@8ap9nEh;G@LPGYkFCi7#Q(2Ow2xSdLC{amd&7O5aqXzGF^gQqT z{PFtu!{_77nRD)Q-}kxh`}$qq-y?zGym-__hFc+U16zgJPOByYALg0=F!U^xx%zK( zEt)rzeat4;^k#!El$Y^2U>R07r&OmGDxVi<#e3S-gu+lnL|~nW&LDL^49Uc1n4N_} z`gK^q7OcFxD9DNwoY9rXDX+pU8&Njx@aMjC!N~wtZ)df{p+BAi6B#dze7+r%mV9%* z1>1XR@JRHd2h&4q-Va~iNKE-ZZer)|e`Oi>EBQi0(0;hdy^iybY{ZNU&VIevcF#vv zAcHMNhd8a%VTF{8@8%63-#-cKeiLYDE~Z2@iMiz!D_OCmbCyGvde8Pwzh6upJYC!-lgr3#|d6{nNLK=cU{r7rk zaMG=ekDB6W!U9r_MI#*KkuKV~?ZOFX0|Q3H{Z#p=D93qt7R|0iS-zrenbaGkS0O0N zz0W6LTy=O53uQn=CvcuEX4EPkzLNGZVi>y3RVIygod8(}?31n@3Nz zz>`2_=aZFw6e;Uty@>l|@%kGl-1yU-1T5e7FpsUJC+qIpI|567t3NRLxPo=rvxiSk z>>&=>{EY7=wmC3|5R?#yUMEgC-(pUnI-HLI$-@7C=TnMRa{i|3Dh0AS_qqZUA_;H- zQ;n+L0EY_T!|cGaAoC@2mNsl>@KoQ0H&8w+B7Jxc^MI{Oy{rw|?+GmnngLpOR3?`D zh6x-U*yMPxiG_Uk-o@R=jcF7~PZ(Ix&Ts%uJy2!{sj}WQW!h5dA!;Zn+}))9Gy%Re zW3V{)87sDG-(|r&P?vx3Wc7%5M!@&+Poz?MGbq<-)T28_$C937R9{%LJ_NXMJ}&Qn?WfH#|99Q?5x3HRy&Li%d&g>;}YEFH#Bx= z(F94b9B+PL?2H?(ykvWA?dydZI9Tyr8*qaRfvEE6RLxb6@ss4+XbBO89x`>ev#U-d8QJq zpU$qX(kXb)9)E0Ij%VhUA(Z01T`mBnt!3@fx9V<}bSPazz=)_DA-54Q;;x;ND?GnJ zoK6hhvqPTF8P_#d2Bvy)Zr`d2U&p*^B+u*q-GN*dwF-1oLWxH`6~W_sAi=!0W|8xQy@=k>4O zkO)pW1$9B#B4;#GrNZ#L8s4dk>&0jE;Ov zPUK3O3EjaUu2?uX=gKR4HjGuftG)f0V7FuY?fWTq*P=gR0?ZOx+f%CFM%PmgbNWbT z+51g!=#uOLAMmMMDL8WAnUKD*@rf`gd-FUak8c@$_?uQ>by=CpJS)PykaaLhgbpEG z?&1(vLLDBaG09ZXhPof~H|=1sl3vZ7{5!9+lF@d3a>iK=fC>f`!(VC9SJmw5J9(29 zKNYeq^R3Q({rJNM(j~~Wi{(Z=L?DnBGN%jwj1!pb&vU$;YhI{XtUp_SsIRm0gC_MZ z&-qsw3VKX_%enMZ&QCAPHBU$hb`kvmI=Mpq*(IS#0ODmy)HU2O?G~#*2lK7;4rT$3 zlbGTF58f`YWW25LsZgHUOi<$)@j-`SgT|soZU_H--8~GDG^Ku^vF~NZgmNpmrV6&N z<2yFW)LRn-8pJoWSB6XNHErsyJ^voL18|5wizU=X1c&&$kp5S4QqpKyWbg+Wfyo1| zW?sO;;hY`H2(dPIq3G^O&|iBEOzy)0AJ6PSpVTH8w54~u)B7LH46pS{j0tQ3`9_TP z(kc4j^Y1hCFTBq{s&B+*iUmVnJ7+Bb2hm(P)tfx74;)0tgH6Lw2W2Go{aqI-{gK&5 zvpbKrirk+hPO@4p0I#f~m(qmm-u%sDxbYdM$(iL&2PBVnQ6g~U>2T4>Ylvn)%E|k< zrn2&jRICM)-)0iF<*8FPJ()?mmH^mzx#wG=4^RlOYTE5H5!l3QNs=Y!H^YEAzl=AzQF1+^(0?v&2sZU(G3$_(bW=6iMOnyP^ z1@%h85w8V@}x1S0}|pnAR+SnB_SsMLqfa-Qu85% zgoq(#rB4>T=?d-XxvAT?c9u*>A++WbI%ZJ2~V1Oc~* z$}BRsLn;y#(}xe;z7_i(sDzn;N%Aqy)0L6{n**UFCZ^C1qMk}V8%{Ox7wEbm&?&)=Qk%XlI)R@%*P$o6otEhn4&_(BW*W~c z|8Ond$Bl-_9}8ige&Yq4z-*9Vsr3?m{PQbGlUD*hup2dt-{W$e;la5;=`b}601qWJ zr~}KkZHIY}(vD6`H$?HUv9TShIOmSX9maCc-lEZ!>$|ta#`g6Tzzm6rS**Nl1fyw}Ot94^|Rj%<6m zEHLfuQeKW~`ytQd#DJnR0B~XC@UH1^IUbrwi6<@d33j@dXg{|oANtdn2Ir6X3DZXm z0s$Z7L>kYQG-uNQweY>_B8&SrwQypEV1G5=Ll=!%U$R%_KBLF_L2`Vwis(7i-0nmv zE=zl#!>pr-lh$dBbibU&-0-9a!i^InjFa`T!u-xAAQdEj6kr^Qm==;@XP|XJ;@f7Px-cv)rI;l{ju{Uje@Nux zQ4{1#vK``N4!~+4a=hY|lMtMON9(IvAfYY&!okzgjBpaCCjC%-ECGc}I6?F8ck~Gg z3hshn(o|4fyxm5q)k&+73ET*gWYydqfD-C8($TWW?1e(BpPlrQvRv?o*SkneZy#*o zz2}3?|Km`O8aKT+Foe*?GGhJrzm46e?dEpSd%pj(NBPKh zUUel$Gze0~WP=bu^Y+*ixXfZ9N;&4DRqS*{4&k%p~;1DH(6<84QW)nrNrGq>Wr@ zsF^QC?(8I^Uwu^que<^6o0>ai4Si71{S;jv)n6n;2D_byh~*BbNZBn~89)U|JK%8Fs&h7x<=^~OU*zM=jrOai6F*bA7h2Fgh`hl4i*9@W}Vm@vhhl#B)is3|S#@vD}? zjYHFzxYsW4Wad*c#f)CWNxM0LhDeBWz}5ER#m9oxfGgB}8nC0(vNY>#iiH0+b-UFm zF+z2Km@gl-$#kOqj`S%I592f5gg*@-RI`-j4ts*jmtM1m7V45!UwHznhLn)d5k^Yc z7N$^Ma@;!gXPt(GgdyX~<$YPm{c;Vwsw|xMgUGOOn^Snp-i9KjZ+gbU2tx+2nM5|2 zNS7q2k@ZrUyEZO^jP7aendY65PE!?W5VodjP?wHG%2-wjwkR<9;>LlKq7MbI-lWp? z$+lb@kohI~-@IM0HCb}0{%MS-OYakp@gmhJrI>n(7+nh2n7z7&$4?)}gqm+bMG(>A zRyz-O1*E~4d3VTu*KT$cF)D;(Mlyl*|dBGaODD3bI(e1TIwFf_KFk@(LQBqd2 z0IopQCi`X)#kJWz`~$O*t~m$Jgmh{j5H3rJ^Zsvb5SqbBS=>N)0xnP1xKOP1k}xkWinIyc4VRri;T^*79q zw+Qzjm_pwe$sf{m6AhFZnt>-lFHTT2Imt`*p7y*HDdnT176xcP13f(oEVdo*3_N$? za&x$%%)c5RDUy3dPXagKc z3+|+$FPXcZNRF@To`QKRVPbO9;%Q3??G02Yi|}dr24SJ`%xtuOOj`{b8Txt-HKi&_ zjhgaHS`@IPI-@pC5X?9^fu(OAvU+Bfu6rC_+a^DiLV9hewGgH@d>J_N!mzv~5q(3K zVjoktK+68w^Y^D0!BnSXpjx`5(h#~R2C-4`R9h@Wn9tRH<@{DHYk)=b0x=4Xi5lAd zMOxzxJ>3~%<1whX#|a1sl-BCLP5}@hvcAwA65V>VNh|2N21=~v;1`>dP43}u_rFUx zQxLAUjN1o^|0mtR_0&w;RV)re#ZHt%y1%TNgP3K{%5gSLx9x1U_MxfV^3#@@u*8x0 zRMQ((!F!YPhqZfTGc0o~++x${>X$T7m6^g`L}~wXol6Nd*5wyTLD1}SzDuY1=s0Zb zFIc&pp|c>{f_SpdpJ(`opRU5thtO=kq8<7f@5c<Z0<(xjO zgJ%Mzxwf`eD~*@v2-DmVgdq{LFY_L3CESh13+d&Z6dwP26Ew+zfHSJK;5&plFz`VN z&8-vi>tq((eJRL=y?6g$-w&n2(>hRNt9aWAZ!MV7}9V!e?YEdZ4d0(&({K;4{A1x{rm6ziJpPolPeVD|A7Xb(XnekAc znhDVk^V7q_Bikfhp_skLb$1re6|T#4R4Z~}e%6>CHIrqTwl#hap-~iOWt}7)%?KoH z;(IZpx`H3~qghS9bxUIg^xm`A>6ykL{W&wmzVjLm!cYkA$Ekp%zow?DTBF+ITy~Ct zokQ+=@V1RV0qB5P!6pwn(ZYXkFW0sG$Fi4MDPIjJ9zo-eMf}(Az-f# zdpF+%f+#ph4o=C=v6Tv)l%uDc#KX-mtcQ9d!P^<7G{4^@?Ou@Mq71KsalP)yS>fl>@{Ub=?=u}MmIawr;hQGFq%e{#Sk#_p&W1qTx&+N%1<5Rg){{xW1BM`#tZ`UOhgW}#D?Oyp3hZT3f z9v#Z~_ZsaOp;A=>97sY#U`Kr~x6O+PWnT9O1+KYK5(>|<%ab?4;$1oB6=Fnk^=dYb zv3gESSmm;B^P57JQBYC0y{V6eOVzSta{hQBURYIK7qfDFvRbxumsijE&TDaVa`91F zZ}~JQC$Qk< zWaC|(|KUTiAx7z6AC6!iy~U#@|I-vm3L@S0EOy*j0Vl(-O^#}m3bpuXS9meDtg4+) z@ps^9WEJr@9ihI~2kz?mm1^=4_MWf*J*h2j$xHtyUbt=Ygqzjj}-cLLbu$VF%eD~^9|1stLtC#XXuNek5Z5R{;E7|?l+7`V zE4{WPCQcTByH}DYJfxWi?zJ}h#=ey@Q4czv=%gOFCK7)2&qv!V-k*o3p$SkaG#MtB z*9}**6dZaT&LvD6Ts3%ZHkuok|E9K)!`3)^H6-43ajD@FX)8rGXypclkFSiM?q^+} z+QKqJOyHT|_3-Ivk*LkZEp}YOqR@io86SblulyBU{RaK6;g*M56JA)ipSjNhV2%4o z8s&BTdl;3(JB=rl$$Uw}RS8c0au0jMW*;`Wm7?6Tc3W#VFe-Fl=gvj1F@9ZaMwXg25|XAUKC??i_XPBTJ+OPp2uT1!Vs zsAEF!-;r?=(xt8K+aNSXx|qZZn658J_Hq=_;-qO=wEs>NS72>ks(MTg4Y`t=sJR-S zfkLkfxk9>|{6q1IXTPLf&Tx$bST9_A81&jGBlb?{SLlPZd}xl7o74NZN5y}-Di#4g z(6Pq+9Y)+pKQjF`4R})QabyWU+xg0E``Yd4w)Qew3B4L7HIQiFM_>CWsf2j?`u_ly C#<+w4 diff --git a/applications/Chat/coati/ray/assets/2m2t_quantize.png b/applications/Chat/coati/ray/assets/2m2t_quantize.png deleted file mode 100644 index 32f758bfd249f205f7b8080242f2e3f8d022e3e3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 134715 zcmeEP2RxPE8%K*&Xs2aFsbsH`ajhs@DT>6!wP*H7REWqb86gcNm7-*{Q!F*3p&X67c^NB1);DDLM2mq8074MX%m8#xUfG&39a zYX3tFttHGfOmwAfeNZP+`tY@jQwZ2fn!v+9_^??YTr#MnYFU}GJ) zQO6Rag*Jz79=K#-Y;0hGF&TbQ)7Z!et!Xi=p@x~6vDNTq+QtUZD51BSfPoCV341|N zQ^NrFsxn5)LU*7oFBjB3Z0lWUjE*k+W;Ryn7efuWF?7LPS3}F#YT*9RM(nH_kXY#q zwgXZQu?l=JwC?96e!jBtaSrt7sD^ei+*x(p%}PC!4*7Q zgJl!@R5WZo2HHWI4LuQD>lbreK0$XevuC`oPxAYRJN| zb3jdWk=6}v2vg+G#GiX$WPd^cKA3oaE+>dVvhhvbAbEa2NWf$xtwAeirHipZE1GEZ zn=LCaodQikSYNCni)$ES46r=gg*G^f#;PA^gZ%+v8^y)@jRJlxgDF3fdZTL-JP$*X z!VG^$P>~&;@`i~DH$>Wri3;x|Q_;})C}c>m6ARK*^?x_E(6B)K4!jvO@;h1!gZw(& zGGQNpM`iiOPIClJ*?93u=78xNGDv@>evmVVP(NW!3oy0zn;XPTFh23b7#JX=P+J?# zt*Hqpf-`q(@$el10y9xsLDn8tVA#+@R6|e_A0c7$Hxbz!lzIdmMa(cjDJPYXS{i6x zZM=l!*5pMW(Vikg!iIiY2nkKk2tq1~D@`gDwY9a`M-fNvBivlvlSM_?I?jYR0%rm1 zFCeI>AU&x>)Z#+(YH{MzXQOw!|ZH7S`foYkYRa!$IIKP9c zk1&?NYsGryhzWbRr}9xK!5B6^$fdVHf-x{cGyPx;JL?z%wW$xsA&U!( z8?FInhvO-vys^O-C#Hdu9b}Ewia|CUUnnrdmNAE_Ff9-|I;h4|Ks4Ma3dd#*&CS@@ z@HcjA+9T6HATiHyF@@&@1jX_C32g9$G9fXA--htb2qbR(14^>NUM-~Y@N9x0<&*`k z@Cpoqkc1MrP68!i8J!kN;>KJ`W*Qiy|5%Lh2ULft47-f1kkf*oI$l!%%lX7C!Ed58 zlqG1Apv09=bw=gv|nyjkrVj|?^j7kP&T z8!$0vAlP90TVlKMkQ;(`XMo)B!tW^3{r6@(a6mKUFuCWPK}O&@N-)T00zUkM84rU= z2@_w;C-jU5c6MBU4aYf~dWdD- z{-n3xrp6!!Idv%y?CiKmE)JW==Yvl??I%~f+@^-;F#%=Ju%P2x9VRMRHF`Se|WB4J6 z$zy2Y_dgUQjGyW-l?P*n>CAb67jF<`9~Pz&OwPkN=n4@MPqaH|Jttp$CpY#Q5`8G= z;UA2?#=&ud$a&y|mZSdr(FRQUVbKO`DTYTIkRvez(FW&V6Woo1+>pC51LWp{4BRNv zJ+(Ox*gR)YAi&J#0NA7q)rR;*l?QVmqZY#0DjnF$0tOo9=3w_EHpdQJ76&_}z*X!M zLFEh+Y@Gyc1GF{Nde}xPY$*g@O*H-uen)t@IKd3Vff`m)V!(+Xwo3#GWrh`wfV5_~ zDnZl!-4O_ygenslTK}d!2NXLF zRWcapLI2zOix`Zrt^qq2?kc~is2ms(wps!^s|s2{7AIQ7(s0^~O|Zi%GV#<4)@`iZ z(5g4Y21xq|qSORC*RM_Qm~FvOm6v|K#wt0e1v6B-1^Wd%eyp1HtN(uNmx0nSgVq54 zZRoaP6<>zg0%(&H(hv+hTMO4(ppCSUwqn#;F_WbKY%`jHdo< z@M0O>=0HU!s{Ob~1-uUNt2K?$O4tB38rveq3|SC3pJyaeH*OphByX|u5^%X1cCKG% z2>+oC>30;yZ9@j#i&1RIFcrti)E}%GHA$vsY(4}hG$Rc6A)t!{WX#7HAD zqWvUT5nPxKqSOd?c8Z+_tjR)DGx?#Jk*=58u6lA6MXz&iygJefmRlwRe#lOA2Abwil$-u|k z5ZO4hVjcwLJ<=@IDSE@t?;R2vSU4&OhG16W}%gy#0h(Hj>5= z-Zlt-v9m&u+u%+BNa_Z^BWOLCMul9tnv^;nc*ii=X)M#8k%4zqW9JNl@>7?Ffwzyi zxiAEhnSr;rW2Y!2>+t?4I1U*=msk<>o~jH3y#2>Tmk1?&18;XHN^U&!*|d@yHw-6; z?Fo}Z>zUiJfVcl}cMvZ3^zUNO!7jc*2>xK9di)gy@eX3#0uPURbO&x)1s+dLoMGuT za9Kgf3?k4dLfr*1K{gHAeO$wjccPt)jS+ae%mx8~2=#y?(qAnsK()niYU9I<{AWv) z;3e|7)fY~MfW-u6Kf*tlvXYT_N5IC4J73#O;Q%xUqAHFzP3(y&*oO>Xj)BYJ zj=IAeQ72amt*v3%e>w&@s74Ev;vIzMp{ES+8JWz1f8>KA^81Jn!5w&}LUISbHU->) zp`~8{3I9BIAXUO=bfD>KnwW)eZu-9}f%Bzs!IPP17L2L=U$cP0>^C!`B3&@E2u9K1 z+4%pw`z`O_4H`2yg^ddU{zn}Q2xfzsMKFps20(c{>lm=!j`X-D>P5p@NeCgIW-l5S z|HTP6{!Tv)u3?Ci@Pa#=Ah-j2%+utU;o`;r=T83_lL*I*U~JA9BKLn@m7w_ocFbl> zv)~T=d+5P9?A{1w5sIS0sYd_j-Qc01xO3JB$86>l?VrUK#;<6E=0#7S;3;0@a?-qL zwvolgXBvzitlIa_1@?n7GBPy6HAypPgZ|k_ejMB(nAv>{q5I$IMvsG8BRsR2)2x3M zKKOHH5sae!v*5z`c?WxDGpA_(EV%IJ%pw#;gL5E$6|c?+7(LRBo^do9PZ8gL1n(S_ z5dwCj@z(qOC(;DQ!5xAzI{$2%z&MyiC~h=dIQUmH#NQe7{5i7-M$!IRaAEwsn{hXK zeD#w5rksvHXBNRI+JGyFu#uBoZ5}UX4Ns%L_sa$+6DW5RqT4hUpvOA^_;ggnUJqnB z11R@c*TN7kQ-`gIwI5_V!?kS=3>Co|L!}$IJQm`vUsldDGuB5VN|X#1w;5peZ(+;QvWBR>OKCB!t5K#1l+|D$a=ul&=5e=G;hUb8yBQI2I3ONUhF|l8Z#?!>SPa? zBm`&h?jRei`8@fXDsTZ?Lf)%Aeu~dvWqthWiwn9E^mgruQyi9ggpg0`{#86_Swp4C z|Kpoh$Il&@8;~c-#tO}Q2=3rDL9nks=@tl2=GCz6#e`D5AHOz_eKLj!4yR%x=?zOL zG65L&h$f%j_>!jv3X1;8rr{w~`v11!VFlmDE*X%UHge@;u%aYVRPg1BPn~MQkq}g- z4mc}+QJ4qY&fms)u!Z`4pa(fq1QO`M31oj~ls*OQgIqI0VIO2cMq%HWs|!yD<}$|W z!ngnoged%02jTP8eyf8pG)5>)#0M)3%(f=~FB&;m9u+PGfwOUH+{n^@vT7urL)ZsY z1)=)`klZN6B1ToLi4zodL?Qo`9j<XCtf+H4^CPr<- z05}IV?f1Mv&>k;-Fm(_no&+yKhA_uLQz$TmE$$#>Gay$?;PghBOF8yx|>XL)4!Bnv(+>{@a!62@`w9MH@!RhKM~3{Dn&r z!eQbN5cQv`EjbQ84Qc_jGJ&x2c;{6ZJDzl$7;jqBpX0AU?<7>CAjL6JowfV zMr3f0l1?%{l8im-g8U{JRDe|qvNZ^eg3TXt;RSZ(Ctaau&XfnyuqFH|IZta;l&CeHG(G`gc`wYnI2Z*x(xqk)d*I>1`v-AF|AsK5Bwf6 zH4I@;|E9$o!74&gvT-Hs{nMO-@$nh6hcJFR6hi&q9^$u$Fslee$#}=WxtZ~taZ*uc z*mdT`O`7~SWPDIW2-kIn(dn@*ef&jJ#W;9FAO?pI-oNp$O%>x{)d+uVre<}%F`z1c zrR)6XtRfU8`&VI}@pBIL$7W2){#DfH&sjw%N;U=%h%j8|nXZ4s_pf3we6WZRun%pwpM zdax?QKj(-|fLR2iX#Xmz@CUq`c`y23#T6#NECNw9cuP9MMovfZ$WeEtj=EoRa4#f8 zICzUZV%Y^wdPZh&!q+A}*_H#6dBo=Czo@JT%%9(vG=RX4-G+4`>VY_w7158JVJ>FenQ@8t?6UGUKI77(0ug6buLMtH2 zi3>>)^!DqCQyi9ggpkAL9_g>Z26)o_6;523X{hQQWS9`X%ztWU>iD?>a|3cF2cZH4 zckr5^DRU=bcNQ`64%Lm7*jsrtSef;v!4h=I4VKXyhE6`liK*QV|qX|M2S+hFYVv!Ow`(KD2o?(V7GIjyMP82rm~Wm_0dA!>U!{M$NEG zXKZPviG3Mpfwxitd>H%h)fz&2CQmN;T6`NcA3#BWUm<6ynes4Fimh5mubgMiyu@ z@Ef)mB1TigKp6VL5Tm7K062^?Lz`o4HI86Ei(&-?q9kBF7m9&P6kNfT61*b831ed; zv6XQnT&&wzxuF$Ph+R;iZH6|`u)rLJ1QU`~SOj!n2H*+-+{9R0+Z+v~0z7Vrk(0wZ zSaF7`^dM*ZG20u~ZCrrj*dv4bZ3pyO6f4D8=3svi#nPnz3)ZIW$9@^OY0%ftx{Z2kz(A8TcLi!b*@viVM=y ziPwe&#dcYFd7;NaJcPaxu$h9qv=O{fgsj#dnyG$&XAEbV4L4JRUeb_t>o;1kc^hU$ z%ttjg{gw;1Rex^Ha3?v~qW{hn;)F1P-?~C@RWSmyQ=G$H8$;_6uG2_EHKP3#St?xI z5wcw)M0bjPAFNqJIN0MY@XG}Q))V%Lu;zw`8=}g8#oWN=5#i9{IIvT_TnfYG5Y4#Z zJbMH%1pD1c2yx1AAzplQ@QC^HuiQ;DC=4TY0g8|Y11NS@1aI(RcJMXWtw(zP!yNnZ z24K9fx(+Sq;mGU1&KpRNVAy@eScN-5{GV0$OaII`tNzvO=W$p(2&RHrMJP&!Z@KIL za}@$H8Uz^O75EvmhyQBk^Z1nvX4RA{83>KcjIoFb76JxefZUjAnCMCyYhmG9=%N-z zL&r?RaNs-qWe6zd6E;UnTWDgKc}=)q)4k#s)n~#|*}$5f>COwlL5-fMl`iSYou$<^XU6&w(0R7#kZ{U`&Qz z)HF6SLTiF?BN}R8*EfbY(>6AM&~WHsCK@{M8wYxdy`Ts{Yuu~K7%dCkffw+??mM>i zE;L3*7k=~Lo?1fvqH7GJ-3WO~v z!g-m(bItJ4WDL>VC^W$l{HX*Hf)1PINtbinot6ZXH$N5R#xtKyE4krJm?0RMFb*x76k5aR zJAsHc+#Q6=O%P|6O$DuITCQQd072_v+YR7@t{`iNh-3+NW5~qVG^hi(_8sqh3gf$z zZs6GF9#ksN}>;8u$f>y-Wffx44-7P!GY1x z%rx2i_VK+Ie(RV+g~2cYQZRTI0m5H}7wD1x6MSv*)eCX}5f%-Lroh-7ly-y&y0D3w z`$4=l9bnw(xlv<;V<7en+slEEjrbCs@U?@bj6g6swlPH1qksz#mNapoYUHMU*waS{ z^c3#d$IGP2j(x{!b)h*2Z(cdxj>7C4v-N=C0wi(av>&9om^xg64L7_U&_#p+8A>Gi z&j1U9nlxrs;Ov+kAOnIkc>ScQ!yVXgLt#P&9>!1cncTCFSAB7VVS>gUCQfl!<`F_Z zt>^@z8Ai+=00<7^*?@9)de~P0*D2lJJTpf<426XoA_Zk5`+=J{dy< z$Aei6T6g4R40}YA?z_Wj4+wh0;q0G;#zU&~|KNC7_QKdD1I|ccg>HdW6IMB-YQpdy zQdHn}lP@Z8B!qt#-+w&JgKg(;<2=|x{WkdmcBTj<&}09rqx30YALN=53i}{)fQXTh zAj_CHr%eauGUW+SaN)z>>L3h_5egIW!3zJ+fEt1tCC$qir;koUn3FWDo!k7-VVV#mcMa8RRBsDd{OYLLn!;hO(K8WcM&Zh%9KaY1Nf z#V3EY#}i*jG|07)RQ@j|LcuQeuM~1z)FzBfsB!QHq4s$3fWe@3(xVOzc$HyjQI3!y z$#Kw>Fq5G;aM#KS8j+kZ!SO&a(+Ws(!0W$50#3O3P~)dI3{k)^25v6M^ims6Rve-> z!6rmanAkHe+A!)hMC@T;E}TdMnRrBKOBks#11{CDy6F@2aR#*jR}JvatMF>Y|mu{~29Z_(~Q%8Tm1Y4Bu$p$gtZ^`wRs?H*Q* zn*h%;!6!g?&oaT+5T1%BPcHgH^?^eF|KYPt@X-o(ZxG%Efu3c8ulN7VStj@f51)}R zQF}8~eC`0Y6dkt0+_(=37(C}>EcWKltN)L_g?3n?C+=z)ysq~+EEBvGm^2uk;LkF_JA4SR7?+uX4^e|klp14?#u|?1&(J3R zVRpsKdonUd-coJ2B@SfDC07``rFWh!4XgJSt>)` z+XT!WK?$37AjgR&C_3%`yy8--(()$Fmh=YU+w(b|t(9~;qr;%BemRjfaW zH~vVmro%({OBD+;e!o>LIE|V>9E#Bd9m=8m&Cje5cFO)6wHbM2(TMirRh#kG@$N77 z4G!l7g{#F{X6^PTBBCLZ6ctvoR{I`GX1#Q8{t4N6sO2OZEIuqy6*|f=|I~b9_N}wF zopK>vRPIcAb*rXU0E3XwoY@*5mQuE?Sb2=<;5<=cVUnnHx>E+P^;Ra_CDV{Z(R79@ z?!W!c;$31=%=PQn(`?u2pxO_Wnm^1cxRRZMrawqSM1uTtlz8;TivESOFwtxw#2+bY zohRZe_^HQ-$TfLA^+J)k#FP!p$9llONb$q*A5GL-V{LMFkUA5Q&R#&w8{*8y#Wh}L-X)hXyIHRhIf&8wPcdD-6MwtUGQ!zgu|Q1ZJW zZTY2!T6W=m2~N5eaxAjh}KxaFY%nfl{Rl*ku!}AD$=3FY}tXz ze0^UsqoSogI(}rgw+@y(>7{W@NQm-2vqTtulf;?D(xD!C!Or`Mk8!;URMb%}73iwU zZMvZj#q0ZQ@^p61b==et3_7EenCceVvYyDu@G{o)4OZc5gT zfGd65|x zG+VtSgF^uKUoFTpD}0mo!GJ;K@@ab;k5?y7dNhlV&G^t)by{Cr~JDSrQi5~0nHy^Qc z_2D@$%(`Ykrm_EJWx_sb6{BOnUx}R|1@dFo~1*uMjbL*pC zUwtOo%c*^O+3XuAB2>xCz|-Mv`K)xoC~oRd_ND-**H7KUd-DZ1owqYcwTlD2K zC*6NQTKb5A_>$`p4QJFi`^ zW-3T^$h(>$u#Pcl9#$E_AF0g^(D_y1|4UZ7HkUUyH>=;K3OIYhPuAZ!s+Rjg9dl*I zg-1dwN%}`3^*h?;>@-&rNZ)TXGWPFLMz$u}+0Lq@ZT_st>;2l`(@R+dLyaXw0d(g*t-UFJ_7uTLfi1%>CX zc+kkmzTgB&|KsE~fQM{dMA@*Gc&D5F4m#T$qr}A585!68LPHPHZrr$WD+^1UYyIVu zVY7}2$;stp`uOk0$9TMN|HcUpXvv;KGo%>!QkT(rf; zS@TH?c(W0ebLkVQ20r2r=I{%T*k?5;rBj?feR_>@if!wu73%hEck~NxD&0OJmLc?5 z^oDTR6F2g^N9$ByKE#BRZo$E?9r}mJf?+!gssq-9bONt$uuBFHW-Lu)p|~a&`;NE$vKdYId#k zs_V*eH($MZ^ZmNS1>IS*g&u`~j=aIFI4!jnjLb5bB=+tpALGM`MS^vo9+gPlG?W~|EcJ+jm2doK+c><8dQ6Y zMc!N_1t0cPXU+Cl(iH1*{5X$e!{d3;wr$_Z%xDfvl5A1Fc}XYSq0QFF$jI)L&FIc`C{)nADAk)Sf;#*NKSFMXT)I)qJ@ z^44NNoh=8Jm{ZXbr7@shb)k)xG$m)G0iMgIw3pa@R=FJR=OgSFkKj^Cq8tmy(o>EMoq$&2n{x_i9S)>Z9{xRdb(G2=3`g3cPMynp%*5^Uy=; zD<5?uP9@PC<_9msYL+G7OHtyrP2YN-qMu+O_`~3UIHdp z`&%HhD&2b-)3=dg&pKN)U!3-R#=niSfq{5=V_aJAcGKQ6XX05(6b!Vqw3=9oyX!`L zdwmsHy~7&-;pj0@EO9_to2XSXc)(;xPfzcp8m}$ASH?}=oPm*1%BCSiCilhx4z9iO z%V#HxA1=6gNFjJ(b#`_z*Y3T7Au+kJs)>0(we?~)N3xPSJ4XO6EO_pfmAr?@F(bp7 zxiEQaeNKSa~Z&zvRs{cY5|k99{??r6Ek zTzmHXIr=*o{(;KZ*PontGSp8IKThl#Q$As!<2?*4b1fTPusWz?kXZ-lY}@y{z%{g#X#I zE+N)bPc~`JCke4zxqiLuJc@No1v0;!X$juwuItCdcuW{fHtB$Rx0Y{}1+;5o%tyOy zt$ev~HTPD`i_@zf(=iJ)zu`~<);UCmo`f8|N6cAv4zF#?CT8{2gSrQwo}|bQb8lh1j%?zrFDG|4u4+#E*><7m z&cU+R&r|#xi^P)x9nTr5rO_#E^qC*9X_@e6`BtVoDs~=MP3jF5)4GbbzvfLocQdp| zoX334F;{Wjn+&M$OVu7M6U(ocjlRBftI`!9!_Rb{d-8MYRlN@2v~DPiTeC<)BI1QS zDX^fFBqc4XC-xA9Iy=t?4(2r7;UExwSuMqu)2wn=B078R z{#^xY=3TYlqv&zUBl>JK-KTuAyCgg})g!jlZ2a2J@sR10;0-@e(_~5p49J_i>HIu}gj=_6Wo(oUJ8hEqHh@RXe~Yp= zn5633++(+vH=AYUuC-iGrgSr=`e#~HU&KWpsk7&FsKTun0)Cvja3d*HD7GQZp@L~s zWQ;0X_J#dmN|6cLcF63Z{DqAC8wZ+!ao=}dsLI3UxbrG9>!-%&O~Y0$A!P+DyK%gn z^n;6wi;_v@Kdiy309PisWT`vMhSbC+Q%H zHy1zbTOV`7t9)a^PU&UqL+ZFQFDqV=c}K=A%ikz^JIBo2oQadOMO#fxO)_T=@Tw?? zQ+@{8XrJ(PcGkz5_*G(5WtlfS!9OFi#s`giW0XuU?DI>imiE}BdW>#Mab1G+(oK74 zDtcrqd2jnh((5Zeh~Z5=Sl4ymziZW@S07~A)(UJ(*xa|rrtSNI2=gjK?i$y8myI;n z*DiT|oQTxg&H1Kd#uj?fD4L7(V3=z)RBkY7(1!c?2vxj1a|O5-%RI}zUUpQf4b$;m zQvVqBIiyqF$@;>N8wD%)8p9egWTKWZrCgP%zk5XAxbUt!qion7)dU06B68PcRZ+mg zR2rHMm0~{wSs0xKgoPG@S8&}Wxqjo8PxYbPqD14MCQr|Yi(>1A7nTgz0b(kORW2WEh!+a1VIC5pQ*t|PS$aQ5+8(WGPPx^ylSQ$2 zIcBABigS{aBrT>ql-;13bcDx2ld_4Cg9^OrxKO%&#mPdNvW*~M{rOC8tI1cre91;YYH-`Ce_Xi64_}T^7cVIT%YuTls!5`ROL`uy~G<4_nB& zG>r4dx`d6g)K;Z>pBGa$>ABAlw*k?^M{M_mL44dOVo2G54SwgYnlDSOOrg>uS)r$T z!bf@cR@T=d=@;H)0g5aDA)42Ez2nZ|l+HdpkK{sR%s;2GR7h5=yS3cm-TmOppc6Us zL+)TZ!G+Ta*1Yp^;TKP(zp#FHf8GtTaL${TF{CztpI2_6U3+}hW#rEY#Jx)m_m;|E z-EmMkxb+1q@T6#~+qIe*&o~Hoan#(iGK#l;d6eRv2ZLg|T53qd^~Wnvj!ieLYTi5` z=hS}(+Am}}n(E!Mo74IYXE3il&HL+zp2~64g$k#8%Arrx^rX>>?XB2a5X!e&qGCka^nHxrn}(bAIZ<26AZQWKH+Wc_a&Ln z<=cBxW`VV7>XB+cU%c&*&t7r&vnTrST^~WS?@KdP?xKSA%AM7Wp7XgTv+SGGHJg~y z1RIY&I?LO0)9ahyoebx&8z0?t4@+9KQ8qWyR+vjFiqdBFrV+-TUu~yTdYdQ z+`VV6?QvhCEfv=o&{VTn&qjSs;|kY^iV}adiVN8uor>w}s(z~Th#H67r=6ScwJ84H z8W5t)+B2UvZsDD)f{W|sIPUgWIvf!1>Jt=IQ@xE>_kFkVr{$K)9=4*DeFw`9eB|TQ zc!B!XZn45++j>dOD_K_x#3|0s=1F@=lG5&XX4jLDIfCRrgSsqO{8gg%WPEJS)u3+q z#wl6W{9d!{I%l7kyn;j2wzsP{8l4h9F#D&ovoa84yE*!w#c3<=2`_Jzh)e-y`t1B0 zW`UGVcV6wgp(dNnFR0d~^QqX9Tr}nS8XNSYok0}O@~FLoSvngOgE{Rl`mfH8Og8Ls zm_ya-&awPma)HD$k4sw=^(%Fgx3-}hOTYEnT|014go#d2=QU|!&30SI%6$yLypi9RbRj)2Ps>dCHl4YpUFG!Zujti$-svaw-I870mc_&- zMVl>eFO%}Q$t*9lHZO*Xp3FvBeRlHC#>FY7y#O245*YwX-~%WIbe4?-Q0k^x*?~_i-6o-TiI-D#7Ogmb{nSHtuFJXV zDoZ^Q%v&6b+U+ns8QHlFl^^H4lwBS~PeIGlMk~q$qOtd2{CnvY)rnDRJ2}t`!Gn9` zR8KoIxbn-s=c7=wS;Wu%uq1Wme&^DzhXbbZYeh2O~bi>F@?PvX;G z>ENBr$7C(A`7J4_*}|PVr-Nw@lVFu|CFsg(EqSL6h2%Yfo31_DJdf6x-A*K3pX1b5 zuO&R9h9UB+XWQ=qxCPOy4pjQQa=#a1(qw52Qk!qR76x{z1dp8(C+`#j_US4UZKasx zoPB&)!?zm8@U48UOS}H6EcFU52G`_aZzcttbG*8c#1RXZ5Q(f}s+`p+SrK{J)n^66 zvi((9>uii;oeyuKceAGv#2n1?AJFnO0LA*gkTx!p26V9_BB5`Iwazn-le_)r%e=|R zv@83PnM~RjE`KuN9k z!v(8Eu+Lx0inqIUrL#*MEL-O>NCkXXXb%fUZ&6~FrTt9XyEB4ljYHPGHA&~Xc7*c# zOR-t)i{vi8VCVKE%kPTr%iBvT@0$f=X=&xSz1iY_K0quY+9kSoe_l2dhbuLI({bf4 zWp3)PfJ}21(6Zp6dN&IQSE{ay9>o;?_;IIq2l?rPl4UOqPhLIP`A&_h**xu|)ODv1 zyBUtVd=*zKi8X3=PZw88QX>5s<*?f0XV$q>_7}mREZ&QxdtxmCb}r$8=Mo*Iy7I*# zhTmgNuF*ZJ;iUbnu|C1!oA4>?y_Ij&d8=~QZ|ZfwtR@-H7j#~dJmFG~x8_TxZrg3e zU0*+5m3gkKQg~&>ssQ@+*xny-~vo}d2en*jVtg2Y)iMPSoWS7?M z+#U3eC0E2j)egP&y5k3t=G4wWserAL1ptk?RdzVIu#QbnAthJ?RkCFXhj0|obc?Hy zegA>#g}k6^T1?+>D?Ic!I+%t^>uumJJ`l$O+}nMsJIgXk(_wdrGjXfEwMg>GQ|mmu zy&b=Hwbr)A9eTLj26)zGXBl>XIdSr&j{oMp-%M0g3T^4i`y4HHPzf>vb&LDF-20$S zCrk1AQ$jMysRuK2sH@*xu}|>qVzkP&Un*F>YOAfgcjJ&J zqhRl;ydZfhn?oD5BVPc4)>=SLA!bDzKIAGIY8aH9S9#GHo^E=CUQ8nYvf1AC%`zL8 z8?`EY)3IRC6nt*c@u6<315IUkLQ$RO?!c`FqU_p#JO*&i!Ux?y9x^d9o_O-)$)+t^ zJhMXhL$Um;qB`|fuVyp>} z?2KH4*c#f(Lt>vptIr1o$qEZQ{op$3Q;@KD!0BTnQKM?Qb1v-*uTYg{u!5-4RO;~+!fD|hK2Lt^ zqFmni$)6Y{a9bf->J>n$4v02?Da<}r0??YBZ67q1v%&?#MMdME36y+co&&_x5ID9~ zn1uYhq@CrF4L`n+(bjv*HP!<&4?N%_vy&MsA9RO1cKWyVCLGT~t4D++uRcOQ|Dpn| zFvB%7-Py&ev2%(KUCA)yD2+FtRmc1FVs20rQ?2+lB075R_qWa#T(dpTsrNQVMmpGP z>Gc)~CKmF}Q~(eK`<*?z*t9y-Du!Cw)XnK&PtK0 z-WD3ES=}ygV6oHou4%o?$3FcE`L5Q)-Ak1PPw4G)k8EHiwW*FaWv<;G%6~zodr^4A zj_UzYY4_r`B-PAPTj+gU^9ZWTr;@ufbk&(ZO`)z!|rYuuDgPj>IUO5>VL z$(Y7`XXGSpGV3&7AY#7Kdix4&rG|1?b;Om@@MYS_bQ(~yue=lMmB`k)JjD~g~JM+*(Knb zoEiw?ji4(ZRTSwf?}>k=t!#}4R~@&$Rd_qwVntJ$8m~|-#f?ixy;^o`-Xgc)eoZyS zHF-CQO=x9F+pf=y$Jh;vzjS_1+S#IoGSpkCT4H!XZE=(A%jeITI~rfEr!QV4qW0TpeW@=_2_3D%1AFUhRhRH9-WQql+Es2qIkLNe{kbi=d+z+sRjW_P z7m4rm0l{lbMM!6>V6W%8l-h-r&+HzZG5;cXFzw{!)DQ0r4OvJ#?Kba?xr+slsTQ3) zZrl1zY&EyRCQk4B_jv(|vJ6-Z$(Qm?rLEuI`@LSnXXa|#@>Mv5TQSYiA*j{Q&kxhn z(KyF&-_3*Pc~TRaULFL|S>kS`y~MJvJsZ_;L_3{M`nf+(Lqz;t&Wgo*TjLTMZl(z4 z6lV8IOGcdq7=V(YWoFfxa9(kig{ya+^zJ#Bv+qJ6Sxa^e`KHb6F(29jEUQyHxtXo4 z!t%0$tHR~u?do&+xs?|#WvEP{a#L>W%ebTK;iY~|F_-^VOz=i8Pwx6TX;lfehAb_z zDZxh@gJjk_xNOXN5M6bFBeb{MD^as1Z*ff^uBsyn%Vi16uT97`Lxn~LTsA9NYh2~e3JQ~HD`YR8t!st zIu-1yW5}7gKbvXv{k)2lpPDB2@-J)JVdYfK!B)2=@Dqz?BU#*;Xh}`AD3#myq9fke zROjowEe=}QyZBB;_tI-2O~%GqF9Uqq_bpZ9JHw$aC`{$+)p*I2;`H?|z`@JB-l`f| zm$W%C!meOeW_NoA@14)r16N&STp(qw1)>+u70)b%^11rsk%->lD|*xd2P#Pk@MvG3Cl>+b!MST9xWu67I(tW%Dxz zD5u_hRx6dhrP{hIN7l`etxTOo=1N%P^7fZ2?*1%DSX*;FGttq~ue-DKBgY(H$EqG1 zM+v8$sPMSHyN4b11T3X!>+3cpn;lekQk7WI+sz)vdGp17C;z?8mrUP&@uF-BKU$l| zdr7n-k(MfYcWK`{pR?V92Ze#}w7f|gIP0Y*EO(T#{&i!pPyfQCpst=kRr7I*=H^%2 zyP3Xb1Tx!cwErkx*73=jiZ(mmKNr(sj1Km?-gQ&0!o2DNx1lW?2S_WyygCo-5}}9++2WOOae~YrOv|PRf#($b)$n4O)RtSP=>MhF5JBS zfZF2cEd1?fH+&DML!))%-|1^^YK;D@VArG88pCzfH0r*~uI{yak8HA0S-khAQdPnx zJK%j89q%@i`RVJHyv8jk*Tvr@{N;vtlQ6+%zXR51r8bb;&__08Wl#cq#>q*k8xU{Z zY09P@CsOYUT-7qUVi)$na!p(B`z=gAKg88;@9p-J5Pj)x_F^%>Y626BN@yyJ={6@g zC%W=m73vkW>xO?7YqCBRQ5-JC>$U|4-a*`>&n=;PQ=qlzynVQvAA(AriZ?&!-bsekC*bQ6G10ZIO8D)-zt| zZazHS9S+YrDXF;RwyD_s-06^dgh$pj`QaLy>ZPlHsCqkQJSo{*=+6IQZY#B`k0^7^^$)Q&0RCjsM50RkR!0_|vNyeOh%Z+AYfc2GPSzUClcrL8 z53j2})u#mLnhNth1>dLM>)kc~`L|`hEHddjIy$+k&83%u-)k~un%<@f{=UXHs4ug# zLM&9YV$CY61u)OWdb3wn`V@o%Gb}!w0v(C6{gQ!~CzTU&H6rwkC)-@k|mD7|cc)aT6 zvJ4-37CoBv0mpsXsX8K)+KUSlH@S7RE^MzQdj--}{Le&a0{+rXr<^DA>@eEWAB-S)F$>YR1$_Hy!? z&OR&ddDjE{0wh#qgkdG}nLf&~Q1yUl!eXs>vT&7(5c+J6qPv(Sv1%zQ!I!Svcb+@9 z(zYf#$kjEOK_*s%lCsHYg#&+_;Dy-PzO}8@*Lc7$(^I=#<4I6dIH^4Fp| z)#&EjDn)JU#jih^^Tod4OAA%h2%&Fz4d60G6A#by48!}|L&a_Mq~&#<0?kwQ4yvX* z<6fk@wr@q!(K@}`sIZqxc|D=IzMrT+U2~3;iRH5{x9ZZ(V8cStk2`#eHivO;D@;~H zIf@iyi4>j=-S1-1ImhNaFy;GL)%$Um*;w2Kc)M=dz6IQ~8VdvYBuX%^c_n(=xXQfD zE1xT?d_JopcJ0c~xn?QlS-$Ps7LJY8f{M?-0DO*tgxHoU$qGY;$s0T-f*RU3GOWEh5qlz|Gh*FdY45P3l(lwj0t!0k`h>ypqyj8!zB{4ggFL2A8fK9$d+AZe_I_jNmi^T;GWoPb4(k#?(Zraqh z^`pxd!It_nimt-8*DhAM_S9;sUtx>$Vxp{BRkMn-DO$eeNI7}lMN<*UboEq!Nirwt zO_blaHdM=oOPr)w7qP8)J^4yn7SE1tvH~J!EW0n(}b(GRXP+1Qi8j2NmqtE~u5ioVqJfl_K_ObeJ;$oDC0n`lKaT>3A+#&0~1A_@;=f zuG1!~9miG|#N}3TTq{sa>;9qgIZZ*~DrdZ2%tVEsjq}nh-1Vnbk_BIu` z6!0lzM}~>@3eM%1KIv4VZs1zF+EcXU1yfSkgHP+jm-v1bSi53HV%f{}PN$bL-H3j( zv;C9bZIIp&#Jfy!_m0#Uau8tvkG?;&v;taG>$NepKeX(}gJpa2PZ%9qta@hE1&#PQ z%**QLRpn9M`gz7h_wd)9(#D|-b;mQBP$hdsQ}-5GtMt9HwZ%N6VKU3caM?$PNd?Sy zqPdix?vnEHXXBi=*43>W$(!7b-$XHHs_r5wKBTuWk*(%^1gC}Q;^+A{C7MfvQSbHg zHs7I0DMC@bj@-rF@7g^!DR*ePja@dVTIXT=x{CX-+9(d+;aCUKE~ zh>$-sKlqeS(90Xux$3}9#m>eewVo{#h@(Zq1 zR#ul96k;-~1?}VxDK5(Z5IJ9yS;@l?`o0f-p*7w9_2iK)K#=Y!^AtAJ1QBnnQ$CWSU;W&KXkyY1ceX;(mI z;`OVH`z)%nLM=cVhWTeZ_pcJSRlaP&q7Bjz+K@+Tv+kkVTfhjzEy8*0-PQ1$vIw%Pa}CtEoFs zpVKlYcN6>OJBQ=9klUp0e`EFio%`}+zVH{RY=Qey6WPnaTz@Kg&9NXw?N4HrX~oIH zn_iYwEUUd7`PEA?bD_MLo)>@8Fi7GX_g(=W0Re&D*PYGPd{%Wki>cd$ML-~m0ugC{ z5Wc0ds!`u73xFi5Wc_j@pZ5ONQNu3N%x~dwI%=4u2j?<1M%v#CEb7fa-BSCYV28k6 zy*dh&A_c|Lcc?I76UUxT^A);EO`F`wtJC9s&crU+-Dj`q?XO~9_l>JYX*FMv0HuQd zIWhUws(fp|Jnl$P@J?2tJ#)(b@k^FLWXE-#uZ{OJL4fLPE%)mnwdGoG#t{f+--6Ac zuT7k)`eE~J!cGQ$<#$a^2q@b^%R<&`oJ!VfQ*9*}_#jFuO3JIJY0qrK1_z}bm0ezD z3G>#abKbGbAjgUY@PC#V>~p#m9O#sA`iG&T@guU%NSWBqmZ&OD^T&5k5_>$vD?9f~ zp|>w`Vk*McgC0JQd57X9{_ViI+5}KWnapfWn=6IR~7DgGM=RHSqzNSd6)-5_w7sY9%gV*H6 ziM2ByP)p|^DS6ZNQ6fCIGVyFp=mGNRQ~cysotAv9j5%~oZ(ek+u@P`PA(k9(V%5u2 zP}N-Bc~&;wS2p%zy!pIV`y2OK)J&`2JyZ|-w)<2G@B0$U_h&9$e6jQG+qd;Eo;}M8 zW7R-i3XWpxJoQS_j}aZhXZ~eBz;&B@LzdLPp+KoU^)QoP26df+Hrssjw(X@F`TXB< zZV7mO{{ZljD|2YRoe_Q5{LHxV8kdc*FvpLY_ggl_bcJVUQPnM}dPa6I&CE^4J|3;` z$<=+ybA1d8hKV+A?a6vo)~;i6so}R%=_yl8ZOugTc;8*AJKFsLK+O4HLF{c(lwc_G zBemPe!`(gN)-Bf8uU|)U=w#){3jP$mz+;@%*`C%HY6n1RMSys$Px>4rv1j>9a+F$y zo>>f7Q^R5^mxFwQs;f3zrM9+w^p_RAKDQ-$2VI7w7?m%8AVq)r>av7&#V*|W@KdR6 zN5^4R%2>w8&5=JNLZnDKJ6no-9!M++bd-}tOG`het>pTgo5l{70xkoN%Nn056yNy% zi=yEeA@ZI!C2Fe>5Cc&uwD&&o-NMor-93v6tcQHt9C^2ftny$+gn#x?`O-?GB=5be zLhEhRmryr1uJ8{El3>;=hDB0ffgj;UpYr=NwTW03RCOy1R?(LM7n$y-7 zsBESzlidzLzf0FdR6&<$z=TzyqhtuCG08NJBhQ~g%P)`bbS0e5x0j}Y)?buTde2UK^+i9v z_o;Out&^u*R!Q-arsn42FR4Wa3GTG*+@Y1;*TVolTq~(~G66u>!mj<95I=(+J&e2A zd(l21eVcSq@j|xxH0`aNP3sfaMw(UaR=n_|OYopk36Cn~J%!yhEgvtVO4FnKzWy7G z={-ovA|mPvbCTj%a&pq{HEquna(Z`v)8Q1?OXe! zn48VxtSO4q+OKpeBf^y%(^h@{QsnV_`ybSasPX6AH5SAe3z-I0e261IlGMJY@@7a_ zJw09AM(MSMAoFA{!2VYY=CwJIglH>9Dw!NnGb)+IWBldwZEr{IcR39TN_+_=u^=V7WGKA?M(F>Bako%jYOkE)#HM z*LivpQ`zWutOtapPd#qx$gb16pW0nf&;6q?^n2^KRz>f5*SHdO!DDVJ_|pR_dlL1J zV=l2EEm-w-jyS!vssCn0TgCJY&Gf>Q=JO#fr_n&;cC!elm(m?q)E8A|0zs7i!C3%E zCXU=r+A$BTz0dltb;+l?W;T|{(im5t>f83MN<0!KqShbhsZ%R7y`)es>yQjlp-&Q( z_a>vEi!)q{%*0v9>i-s1;>@(F&<(h(YyIVc`xcJ{1cez~zMTwUQ7?b(Mo#SHZ7;M| zkWcd|t6tI7dDvxvLl-u+&pY=0)AhT(FP+{-S)~Yn)`)&2#&AskKtavZRy*BqoQ%J+Nj=UU3>65zjpZo63`j!{G z_u+E6^S9n0bUcJGBbn@LxSVpp(pu-e@H(5HagPXGO)tC=^4ko>TOz-0^1WnK;!yd=JCU4;K3 zhMJm_7gpv(-An2)DPCvG9>L9ZI^~|^mHXtVlC`wS6eO3yGQY924OS;l0G-Uzl zsHZMIE1tNbXb$hgrUUGsi%kc>>f9Tp9T#O{eILi46ZAdms;BG`^}0%rMyKStM`cKd zd63N8+s)mZxRb&*lvjB^?}yxo7r-F6@Gf z4^J-hVohRvDG=CKaX;Mr*gQD%M{emmV{c9W0K4aVxY?fXf8s)aH8_QvE4C!~#|`TO zL(T0V0xonc+q!ml=EdtDA6#A;e}1m~cFuEp%3hZOt@ct$U6J=*iz?X@_-wzB5Eo$a z`}90zXCLM41z=zt@%fAP(;DwPei-bHqCLY!MB*roWl%E^;^U`Jfvf}Vn6+|O^3Rd+ zzaSwIqjL6nSx7{j)=%+HB*l&5ub(f|+)7jCf!#+khqMFxrde;W%7~pY!DANE8Z+u2 z-b&+~d2|0PnzDLq;>#_tpv0YyT|qj3?Wj$V)9OCo^w$afo@+Ty!TgIu+ay!A{x{3^tq)FW=wOnXSaO z9psUF0y#?AfmLJmxBfQG=IZ;%yMt#wh{IA(dnO&S2N$%6?JvsTBrVLAy z)C8g~o)Ds*z|ZOA+n;GGm)@4hrYSoMsIY$}O<4h;?9W2`mB|}H0x*BXW!bg6NwGR>10AspXy^x^l)cG!Rpen)eko26=ki;^cAmZa?H6+PD{IdpialFkmq7F#=79HUPuj;2-xRG|eKvj^_ZdS13SbYR}sX=6~ z?G~ri7&D;b#f6)in5e#dN#%1gVl(9ss!Y~%&Gy(`my3t0*(G)D=mX=;w^d@gl z0=P;sQ!u5@dSb!V8~{`}*$9Bo4S1J4xpT1F%!lhYpohGzBmm`(Fa%Ijvty}c&JzoA z+z2D#0u~Sl<1=Y1ay)-^)|H^MpC0iu9FE_^`9E~p5@zFprLv2 zvClNQ*{#!d@PvXCLtgdp<3~;gT!T7m9nXVa9$DhgNt^rnXYcGBtL<0DwWq7x0WaBA zOw!OJH{MQEKkR!29_qFF=WoUNMJ{z)LlV1MweDRfyQH^PlV2uQK<75banlTz zBMH+BJH8BJBsGD|X1U6^bI7Wqh1LhLSf{4o(S}KEqxYGkECQ3={VqtuGyn-64R#R~ z?)PMg7+Fd%=$=IcTy`VuQpR*#?9I35L(Ue~c0rrbCgZxl8*yqMJ{0jyHw0+q>mu%x z_C@erdu(26??BA@Ge7!XoxfgVmsxCbUZ?W;{G75s38saf|7m4^kBDTYJ$`T+#2lsr%iT%qr4_C3hV&9}4s%|sZsGAJyIfy-I4YrQ-kDlLh@ZF&sj$8a z7i9PyTcJ|+53i3`WZ}h^u}9+V?ac~^63}ljJY`dzElMsdCA-M^wvryfcGBZuNQMwDi^aQ3QrG!yBExi~4lGK^LN; z11=fri;y|?cXgWp-aEe5ge=uE|OrVZ9nz+I3CDtSt(-J^68T%7l_f%bZlL zI*uZamRMM7QR#b%2?w-%k!#~dek`Xs?L4NjXS`EC$f;o7;T@@5R-x+eBw0~BaOCK3 zAqC@M6T(an^n&AXK|OAp%ZcDJHb{`CH-IfaYk54rRdKzPcgRS?zEcyMN#&%CuL9?k z#mLVxl(}j`^BWYY!gi)HC@{MMI|9Xn&n&&$5e+r#)uMNM%#D!Sj=qJVqiutuVR_DN zM@fJtr4ufu+p6hj3fCKGNE$9ZB3?~fEX?wi1G>4Pq&rce^e4CI?b17D;;lwYXr$DZ zILX}(9O{}ToR-drl5&A^*6#^k3kBd1W|N`ZmlM^79u)N~)nnu@<#1@>c$gV!=|n_B}yu)T~8ArE;`3^D)o~`^&tEJ#31q9{EqRHNl5~f zY=tWJ=cqDSJa)6pVK#glEUQ-ppzct(V{a%}I7V;h3d(8SIQD&3N$OVmQcDd&XMsdY zi?PFM?%wHN9u=PKZF(;{_i?!GWciJK&~K>Lh++ek7D2(wx^$njf*vpOr85Sn>xi`I zn(bb1%U+eFSH6IkOK7dQ2 zW{3febzb}h_TzAWM#}y->=AS9Xez2#pCX`4LEa9ZvvR0leP9wlkFV)hjlwA$!>_v! zRzDF2rQ;t>UgEyK*JXH;;SJW@Q(t4e(v*HBzGs66C3c~L2*Q!eU&HVYztd%{@jaB7 zgdOXA3RXbMK*4jr-0x)I=6<#|?Y;^K%t23bDNydqQvAG3G_;=qSmEQ+Pd}Aa#SMi= z6@9gH+3zp+oh*=@iIibL^2fu+3mn^^1qb_>-rjzuEgt+)&wuvayjxFsS3`B$b zqW;t*TMDN{c!TPeAX0mb(TIS!jVy=@n=XT#vur~LUWb+Il?!xz9 z1cOR>c^g+3u0EO^^1rwMW_?0S#yO`w&dX0iUVE!xAUo5vHJmjMx&!K7iPHi=Z$|OW zF_4?3MB`#s#Xj44baR@I;{ho|rV2-S)N+tu?vgs9_E)=^MorHmHJ}1jI6&53REW&< zmh#W?KDoZw#UrPj1Uk9(^#DtyLI%s}Q0PTh&KePNIL&(A~jbviU*--G`ZR|0?}i>;AsfC?++#;TmXSQVV;Ykvgkg!`>UQRe%w0OQo@th9~ zgG3Gn+B`fU*U1p&=Q7?h5aB=IqXSFGF>SUOEcYH2Rgwuu+;$|AxRwsli}%~K_F`WX9!$9wVi6VVb7a)ty^a;3sg;P zoSEQXBF|m}Ge^DYd-Zgl-;cPhe~8|?doNF?##B{XJK5YtK0&X+kp}aj;3-o``{1A& zUi&N2wtiIP)Vax4AAyd}&ZdvLa&qB&LYY%Kb=E0bJDZ;8Y!f2&fc>ORzJ-B&9f9K8 zEx^N5lcmdH`=j!OEQe_i7&IPd(y_6zVi^K%te!8;c^L*cpc?xM-cuGEf`tN-3R!e%xtS4Cv@L38fTF>QR>T*VE@sPcOn|G^; z@$r(5!EL$G!i;CnPucdBQ7#jCnYd_rS(g}id3ou$xN0>hY}^y1b|tWc9$n$nL}<05 z!a8vF0KHEYXle8D$XQbk0wbO1YU4}hAKO4Z+t^3U43%$her#dUxp;2+3PS^mo&*jJ z=`s__xn>WiM#p72F|=@}M|un~d}?YM@p=>7bK1*}Aa^nmvy60#%AF><32$3A*{?{? z@;*qS-jrHCZX&54qC73k7b4~Iiv>e&di7q&;*)+|Q5K+3vBO1?glNXcu$@fA_X%Iz)^r4SdHdP>|b`82Se3u-QO@9k#(G2%-yhF9iB{DRS@L$^V%uA+pH92v#g1AvP4IB zYK`5eSp^&>CPzl>8^*SEeu-cVhR{ZV{)(b~dg|>^=#2;7>Eh~lv#>R%-5HVrc96T@ zK|)-u566Y|g|Jrc{=_FZdpB&F9@KovRQ>CX7mV(@_jL7^Ro@s>V%^XjGX%`xE-uM3 z_+i8FiHXmqKxkY7vbnK1zH@^I%aIX&a%2Lo9}YS4$aIqIsQN@U8^WO=it3Q+O|TCa z;A+!>b{Df;(!5tlVSo4Zgp-QRQ4P^?$xbI6GUBp1#==ESotrwxVvTVh87Wls{TT}m zuv+dhu7U#Atb)tLewlYJ-(KTpOcQcZ6+0Fs!ilYtvxkCwmY8A z6V{dX8Z^th-2)`J=pcf76i5k^*1UEDc~5q_u*nYj4JQLh>?-$i?&Abf>t%guz;+dfc~M6dPUQzZhJ=O*%&f*H#b+ZL)rIku!m0zn;FD*NCk?MM2qL6jt=N zfq{a(z32m`irz0pHmFDq4GkK((vjUD3tletr{tN%eb(^^$sghJ8Nz*Hb!SDe6DB9T zI9Iodzu%R}a$yKom@LoTmXj}$pWhgYwvi#9A8&iv05qo*5ce)pjV15j1|tv!?71MX zGL{0H@Hg#IHUML){@6aw1*QcqOCbvP-=Ma@ty}g|y1#$_4!0R;Y$d(z312qjLAX`8 zq|$E$wozg;J_sI6a)g4J*?M&-U!Ho2?gMcMx=?_B6*8~iAuyF_dY6zenhuCHQv#m1 z5U~fsz;7#248A>s|F#viA7Aaa6uHu$KYvD&d+&)cgnAegVR&*xAy_I=HT-=u|{@O~b+hF}moo^6blTG{hQHrNCW2KYY%zp0Cw(@G4&v9npU>rWH4 z^p+XGIpTWalfz|l8W#Bm(|J;-Vgf#{}dGcPxb-8JVPGXZn%D1A11P(Je+E2B;vUwF$*%U+hZZ23^jH2^|is=kAOcn4NNG`H-M?h7r?k}`aEyXERAf4 z{$k`dqC5x@tK<2uoY;Y%i3yM2+!mu;(*qnK^ZF^h<>lqk?|pscF>!HtiAhOX;vW&? zZy_q9Am6?{gMo_r9Ssw+f;5?u_p;l zN7AYUHVfQyC;tz#u*-0Nz<=9kTDZ^rkMR+3Usw}u3r{m}X?slzZuQUKwrPOQ_S%rY zz4zNi5KZG&!iu>b^Zmyyv=b4yn{>xMCg1<0x-q;?^ZppDK$}y2m@lyzul9xQvk-FP!`ef9~To5l*+39rB$X{OnO94|IsVV zAg%b9u1eQ6{+CwumM3zt(EdlSn1BT3U%HC7_S?U-D$StG+MoyX_g<+6BY@?t6|F+{UXvDkQ+BNaft^&%%ltBoyl9XOMmO_i4|D2LSJv z`h*SD2vK2S!0abtWnp&prd;mLpRnSMPnCEHP{*j!-&F@ca#2xUeu~L0wTr~r>>nYe`Lc?|!=Y3n5 z1~JWRTnf7qms-T6gSoQ^B<`k-M#+Ky*cpgX*BmY%^~)3fC|_CI_#w->>Mfa9UEW-! z>0HB9xO2ER_(uBeeO2rut+9^%I8%Ke;0^Qlvg+^OiM%IDA3c~&_1i^IFN*@p?e33U zv){)J_Fgi$+PS8J^V?@eK>i@}vzv~*U->>q-}cwKX5k=ypTn?hnI_W76g?zhrt*7B zY2l6(RL0Ngf4@jUA@_4{OQ+PT&USRh;OxOox=;4@4NJv81e1@qf4j?b1|&W&H)Uvl zf7Y6<8nDTG32jlNYhWr|y}*Wf4oT|$e(yy10~wrNvj2WDK>Ybd3=tOR_Ebe|(;0u% zRU<`#SHIX!*ZLVy9Q^k!K=C?}osRa_Z!dvO57zTr9Ye`AR0KV~VgA*!u>ef9Sh@B>m1*w9e%<&86cf89i?{}*3C^nJSS za8&5Sk5k)aFc@qDI0tD#zkf&V?(Sy7p%4Bh=p#WvPrqjR^y$-Q=H_c1@87>SBD>eg zO?A$OV*~3A9bf`MATVl~gW*Z&*d)UPGgT%#*KOZYG1=dCO zAekx#kODpdfz;>EpRY|=-j3~}EUm1ld3y5a7XZ<3u8al9KpKpIXm%9fDnURAtg2X*xA-ea;UPK(xys`4KxF{t4cJ_)j!-211JoF;Vi;SszL5Q&m;H zv7Y|qpaItAR{+}Rl_kqXLZm|iWW)kcP=_Nyh2YQ4EgVl~9Bh>PAf84vwzN{j2l4dr z$FHX2ph(@S1#&)HkR7l9zcZeLfsUuZ*fxO6YHT2eQi9c}#nVO6)YR07KvNYT zK=IQDJbl*e?L*+2;oI$g>ohmU4x5m$&S`xp9|wE?8{7Rub7sU=2H}%wC$bc~C{9x2 ziCiA+fmraYsKjfv1a4;~<1H?V`9lblv_+$~qMAIz0ZTrl9` z$u`cIKq9)xysDLE;F09_ckWtC(e?d%HUK9E>*svsfjvMz*i#2PB8b9_I&UNCq67v8 zssy1C6anWbXOQcv0hhb=`4DniUxV(^db>F*kcAjT4qJazku<=5oC#iVzh^L)AGB}7 zJ*DiU2%W=>X0l|BPXRpz-un9b22<${CIR^NA3=u>$zdP)NP&;1wKRIN z$(CmD|Eyb}0m(5sCHY?flq2KYRHi3SRJ-1+I;VnCaB*>XHN{UZq$bzxcSF{^e!gJ2 zC^?_k$N(v0zUix18tR%jL100}0C}bfy{+>K+=|ce&WCIdKi=8`C%_IC`B86|RbuW9 zA_3^U{XmIkY0O3Wi-7kfZYcIjFQXp{L2c`Y!49wCf=fh{1ULIMi)E@nBSm`y$S^nZ9;`JNK9v45Oa`kRp9#WJHaC=@~v%=l_je-~lr- z0PKg$;kaX1!RX#PCn)mf#mD0>eN8iqeDTN@#eV07x3}N}L3fo?&NPuBtIS6^iU=5B zg#YlV&=%#s4?-Pna^x(Jv?gCQipXCsn1&Hw|RqUkMh9A=q+v z&^Ipc)4gfnOF@e_h@Xu{j|^^z59VL)(_Hc__=IG*e%+_YvIMx@!60(Hss)&nt;K$O z=IPrrICscKht+<0!LaMphT1@0M1iNm2&V+^X^>oe`1`%0b9|jLAt{YdCw0X}S2hn! z$P5+6`)qR4_@YF`sX51SDP7Sjro*W=PYQ=d9}g)QT zu1;aa{9HYi?*nV1?TfChJk`T%mXr6+ufB%hyUHMGMj0*D^&PGcuR~+0#S4FIP1J-; z*4ssgNHpY!gN|cHv^!%lHVc>)6@3d9nEP~giR?$F6WDE)WD?bLg!IsUBGz*q9Y5yUL=Vb6TRGVlt zt~kgWI0Q#-ZEfLx{@hUr^#i5nAz3ass$M0@m$CBYbG}BYEcsmANevF!+<|aHnLUqz z@Z&cx^?cUMa!;=8m94!}8HIVLY*G~ql```1tN~Fd?B(5zv^4qb>};L`7)b!yf24m+?wfD;`kq6T=9yVnjaffh29sUQ#3z<@KYM^3 zBnQK-137EHh>HGlzil-j`Bifg=lL2Kh)g2lD_9E_d+k&io}*md6TygdcdRk8)t;O4 z_9FaLpyxGQpvVa-UX#@(5mSb6sq-IHID`N64m@OoQ+t{U zz+S?-Ohn}NKzHs9$=9Yij8YL(tB>yVCj4BV?p_MkuFT(X=AQQx-Js*`9+*#5=MSvQ z0|}ol(vfIeDKa3E70tmmo(}Q?oCy?Z?}n13J_aR|9Gt)114*8t;Q$inA|2Jza^%#+ z#DuNDn;5qUl-r_hM1TH%2l4 zyJ)k}>C0bSfO`^>hAcWK%YZ|u>5K^IbVk@6z&#Nd5&rfpyZ70@sFp&2K6&vBJqdgC z1*kuNZ)C%w+|9(1yA%Ut?kTljzx9`W;VYo%)E6=z1W&X9wRx5&S6zC4y<=U_FRz@x zdE{_Xtb_x9Z_rKaTe?WEwqN#ysf431LZPMEjL!(u3_5tT@uRXvn(t67aZyVGhAS4x z3ot8({@ru%kOi*DQ%)*-aJGzpw06L_eX(A2c-^+zIP`%h7}Ri4_#kvVF@ndp*ZcFv zXB*0TqqZOB5y%(Bw0QH~lD;t`Y7K=dXCAR;@VNz;Dw0UQ$foS*8;|u3yAz!vjTeHI z=UHoYze_p~m`47hIr%EXn}By+&7AsjQR^q%GI1r{BNnvXf<)X9lf|bSG~LHzb}W^7 zWVN46iiJEhZZ=DB+koHc`1PX|<-*L|Z0gxOKD(&*o-X3%#=Gn=deD4B@9L^b^y7VW-@j)jKpa*l1 z7>VPbL_%x_7!|2M+5hzXU&Q8_x*~;oS*KSNwcl<(iK-UiI2|J*Ka+d7|2p2V<|@SJ zU8S2+m@1k_)PmCnW>Qk_j(*{@$osO;rgN&FWA-HIe>??+WK^uJ9zH960>hhs93SJa zffd4$nQoC()YpU2&EgO=!&Q`vpYy}!3#GU6$786CexNvBH>m|ON%1F*k;I0?oWQ=w z98h3?`ZdFV2i9L4a&CacS99qvx7BW5p!+i4-j6NMCdB_UBf?vR!i@+4ZE({>{~544 z{v}YLToN6bH+Hu3*G0OSg{xat_S* z1A_Rg6x}*0Aa5uD3d(<+U7yqE26?|1{>wWMaP8Ht4*zY5|6LX%4h-`EyZwLh^xu68 z|Ly5lS#1K;&jj^<6Aek7-n>P!x|2PhU*u?g+w$;d23+11tRyUwwKknE@639@p3xF7 zTia?U313;D_mSYvi2iV+#XtG;|Iriwx2A-beqO6cX6mEY`>SsontGF8m`{J_N{V5X zek9%?|CI44XBth)E?kW0kzA4c+{TR@O<10O_~UlHcWYEj5A9evdO2Aa*;g(_`ZLEY z`W;v|(_8w5FVe53vXrx;`#yBnv|43cUG~*vwLD|Z=n|0kI6K29eUN|Hnd7Up9;B%7gY_-%uUwDZm~#gcWXp$VlUC=j)Zro-Kf;T~1rc zu}H+ISN=-$>!p>EO%4|1DwB$us_1=XWs#l$&P1r3bmxHa_LNFwMPwLM?<&t>LKB#D z%KxdEz?_2b?TWt!ll16u{7-9RyP$45q;w@bJn|FYCNJCI(37zAGjuaw7)$<<^}`s> zG&~OvPCsK>8zK7`R{cf=DI+0L13k)=Sx3t~Qy<9gBM!ni*5SQni?!1%(qP{gTEWmT;H&euemxt;e7Mm7a& z(5Hy`&DTfnceSQUAl^rmvc%Lw@?S+HI?zKnxxaNGW;( zPA5?emR4t`S&H6v50gt^P)22lxsY_b=q&tv~InF z6+ii+VLb6Q#nJ^G4UJ;x+qWOa3YD|qXhsL8O=8n+LrlvM%c-h`gfU@phSrfGEOmuTzp z;2jHxmZ6<84ntM7r6deu$?-?=U>OPE1}=!$ibH|T z+i?%+ino(A^G%iI(hbRQML(4!>snJLu=Nd+d)6>)Wduym3P5%GMov+&jLTvuFAy;K zJ!=*5wOhUUfSiNx1ibp)>gwui93>o26pwWi3QcTrLo{_L;gCGEG#xdoYV#d&&Wag1olc*c@lNMj`{}7@ zgBT5Y;+0oMT5;f0ykrP$hE z|3ISL#o{_~`O}l&BZ`Nzs=M7{V9bd$^k5Fm^L-9=8~JVB+&edm62ya4rw~4;6rrb~F;N20W-&lqFJ2`TegXW0LLglsJjfLnVh16?RJ95T(2v5s z1`f2K-IDV{kwAC;Pfs0Hmm)wmLZqDOt=LB8EW`=ApXL52dS{x7r~bon9>&MNzly_e z??^QIv|QeR1k3ry(QML%`Hh{QTAXLGMvEF%N2eH6KzZ`)Qr`LWglw{Qm3OpUEpf-j zNXFCf7X6ZkR_ZR~Y=v})8h2NaBC2Q3eKGR#2_&1ax@l4rD$k}_{2CjkmMmTuQz33B zk8g;H3f}G=+!Fnd@2Kmb>YqUKr6F^jt7PNBxdu*e#~~#>G1KW+&-SdyJOMUn59dHr}Qv*3L=kqoMUBID! zA^?n3#eqX?HsIr$2*_Q#f$~EFI~)!MWK-P2UI4!Kn_Mxd7Yx9ut>!g=G<>;WwW-pR z0D{j(Ngu|a(J@9<8^)g2m!nfTQ4jc{q=ncg3$J}xny)VG))y1xEf!80@JUHGfvSBO z>mk2r`?q`fM#tA%=B0|Ib%z^o&*GC~)~bA5X2uOYGgfY_%7^-@-L%vqHthzi(@O75 zqn?RM&W~Q*G}5P7=COP$xA5&LVWW%e<|YShn%Yga-A%8Zu8xQcepUljXUAkIHZWs3 zTt-zkd~Vs51ay-lb+=|nT-he?Dr=5V=J4&B3|Qq(d78{`Vm`4s|4`-H*!;Zvbl+^6 z^SU30T3W$#yLS9#U<~jkEJ@fTCjiIi6ufZwsV^8XodLk0YR`UE;B!(yjD2pBK+Lb)O|U+>QL8XzEtkvtM_8NdTK_ zz2G;0$*;}aU_`hWXP7J+m*tyjIaZXP%)T0#vzt>-{^XMS)h17h9N!V-7T-f4A5l?t zKjMw@8;p~q;ru4M=&Mlvs|R*V=ib?jYc{Z*A9)-U%(rh`H@}oho2HiGc0D%_%8!nM zukY9}-Bx}{IKpsS0jivqpv=uERu`JIeH_8Yi+&DfT`xKzvw5!t7rVG2iWA|kH>xT* z5z%Vh^+>ULF+iQef2>9fTLFdDyV!&Tmr}TOpDW5){VbK|>5Ck%-JqD0$Cws|V_85m zAGEV|#$nMFNEDA!%m2FdXT~^#aO)$RzRPfKn~A3zD6RauWl{D{S6ZtOg~FPeBAqzX zk$iO?-CEQ9MH%FXwIrjoY;1RzbHR%uilMx$~tm#_4*;ps~zaBIz)h&D>x)b=j24;fUhl0c0*(fa+>v z|KRj^drm&9{QBz?VWUPpZg-xNYx8dOBl_mZ(nc+bKPnccfFgE=RE@1Pk$1p;`#_Ps_pPmF|97L1Jw2 z(*`r6JV(@JeAP>+L_7YA3R|`tabZR8YYP&gE!2FWYrWB~(ISU-+Nh@&TnTCxU58;B zPx6N}RbAcwGDHN129onqx-q(YHHxo;4o*|Dp3f^Y#$v;yCFe7eC_yh?U9W9|J`_UM zg;wkgbCgNG3hVFEkYgD>Y|YuOjP|`U;{WvG>QsrmjQ%{|X0ThWX$&s(Pkum zTqGBzuGmew^jYaqT86)b=a1QIJQSbZ{Gv6MmGKVxjj8hVoYcNVZq@+n?09L)AgY;v zgN5q*_S9@PihgUly2{Al246+L<5Eg#?)lw&+jK{BfhrGAPKP-c6NrjT4${JFcbU5~ zIv`G8_1lc~M%Kl59QOh8ARu!z%6G{wo@wLAb1g;QoSVGHV$(T8<-RLoG*E0xlYmz& zGwsSW`cw(lXn4r%ANQ?gcq!f859q6BJypX*(a$7yFE7eMLvi1zl{2-5h+EeXCMvsq z{55T>jbz6G9#FdWFo0fD-1U0Te6oordNW-(~DoYsffg9w~p_I9k5Ai7`2#p zoNxM7i`)4HHrM}hL5`;N9mspLw5f~y02Jl&SKuk{wS=q_#FXXlxT%L@2Kq@fh$ZO2;;QNpC+5KFCaJ#CF+mv>r zDJRC$*f3vFbv35LK>>PI?+6ol}Ef6-=1^8{e4-MyI7G?mzx$*HmpMaAeTr9bhN zq?i0!CQyJB_}@Z*nrOkB0!%%|MZt=9sB`wq7g2WimoK7(9>iD3=X5@#i{49$hmbCb* z;SN>~K@^vrhNqC5b^8FjWJ7`jx=h1yJ=^sSP7y+pEay=^F_pT&YdGtPZvZZC6Xi?LpDlComRcx zltS{z)@hVYje44w@Ws#ZTB}Mc%)sU?Tq!0Zifx zs8P!uq^juVOF1S(wNm<{dLyRsgOAbP6uAfJB3CJlkLW;GiV$Y~+MX*yVvJ^*I=u$> z_YHI}jqTTyhw@uoBMq#=vB8k2keA9r3b{vCP$ltgXg2GF(8_-46V4$tS!V4^Pz0yX zg7f6j46CQd=WdPMXD%`gn%&O^Pv?~f4?Tv9g&7{nmHd> zrR=LqS?{;!;P_F&uI~Y)+Q{*G9DVFNI90GU=-kaVIPTk3GXEbvR~ln9UN|nyh@)B~ z3-OYVFVoem)8lXEGhUzXQS|vFzAl&GYr8)nk_dxf*-TCk$Yp`UFd;k^_7eT3HTj+T zbJGJvZ#?8Fl?(F)6cP+XKTp>N2VJ7$8NM&f@|&a_a&mICn6BJETw9YV@?E?W9wTyK z_OP}kw@jk4rY(!dn00%i1^E1|n-Wv9NqV0&?dICH>}sxfE?1wJU+|>Zb~I~aTdI;< z@0VW--&TbJ;^2@nV76F8L&R^NFMLb|CIL6;;LuO+tV&V059$}0w){ZtcyHn<vXjyK9kB4HnFKvC6%M4 z<7xK&dG(|k_+;%OQ&CxCN%7cR*X?p$Rp1pJMSpjwSHGgIU{AD0u_O0=S%B z#agpnpO$g8G|r5fe&HDH&lEg+wjQ4Fs!5zr1Vz86I~SU~CLlkXUN;%N;j4w}MY+pJ zHP>iBQf63{8Z5-8A2#izi)qvuMW&7nAyBgld~WApoJviy+Z-N9q3q|t%4OvdK{e0< zHfhqh8@_O9A--74krMd>Qz046l2PZWkU6GtMX3qWLJ=NGVm$yk)dMCG54o`SCj|#c zS5F#G^w97yq`Rxja$f5eZ4um2?$MtIEPO}tcu|vC2>Ipc2q3RIBBQIZjaeZ=DM`u0 z(z#rjS3&2PubRa8L{OK=ge675VWnkP3s|;?HK>r1po|zdAoYgLN=13sH#H6Mq|EXD zGDNfzwbQ-$P6BKPqnc};y17)@uZCXM^h0t&9f>LZ_QqR2E&x}df|b)+T^h#kv%Een zlyc52;hZ!%zp`jcP_@JwT?)4a7=&%JYJ-fvvZTPWWl9A;$nl)ONnXD!?;~Q&rjEk@ z@D%*x%`eANXN6kxp}4)IuIq%VtuG`dSXGp61!N_%o6ak!hH9b*C2`k%fzkN5@vq(T#leo-sE3V@R`WF5apQxPBLHl~Yk6dPN%8-iPQScwf9 zU{jNj(s9bvE`^Q!5=J zV6r4dvFygC4-E^Gui~;k4N-U@)E(FY9~}MShFi}M=KX~8+_YG8HuVhc_$YU6$5WMs zJ(CPGAw@q3rs`hVrB}yduGZi9kbR{P-Z2LP(L5Tn zOs55zxA#9Ah~RI;x_`z32S6t*<%7eiekJ~dlx*Lq@G{t#KdMW9ox`H|r&}w0trT}N z-!ObRmR<(l`<8B>B zQ1yQ?@McxB`#>0L#@%-TJz(WzUdRY)I=FE%nDxtKt4fRfQES>+S2j{HRn|2pxGi7d zanxuodi-sD6I1m~nIf2sHwme!x>kS!jM{3g_+3vy=|7OjF24P~OHlazKu@Fz`W$|; z$RRdNkXCu{>|V{PTF9LvbNx=pu-8AW5%IOM+1<%Sr=~J}W--B@bG-ckzS77dL3|Dr zKYzwFGF<=U?D63h!AwL@LdGX5$hPI=lv8Z(d>4|HTFH zqikY|cUd#-< z%`|Kc2i8IBiMm`CmeEbGsg}w>#JBa;@<`8Ps-A9Fr`p}E_E}kXcji{3(>_&~kYk`_ zt<{a;Xqw$@XPvFy>9>|urHu0So4T&mpxatG+qZ8MEyl%hb8Yk3`)a(HchHL<)TwqN zf`SM!Aa7G<>W{!36~MnX{x8(PiB5B2`n>b)Y$P9xW?CH|ke%kRP5i8PwVvBOH0vYY z5G`-@f7R`TjQmASi%N}R zT4+iAN7&?%k+`pMVf3@MwY*2=3+h&Uf*9 zhugQWoh@@anQ7f?Q|!9v+yOnxt@$4_dy6ewtekn4B@G|;-Y~}1JrsOO#A-;J2Fodp zEH8^uuoL2KQ=zn522~rnXTSv1r>vprf>Td2jLy<+O@*rV4D(^rT+=xRKQKYN{UE9Q zKBE;~7cpGwqdjZr1uW9Tp~isa0Zw-la7Mk{%p$)o)Xxvx>9w;%enQ?jtlb04Acm++ zIMNg=l3!sZrCp5D>cX~9@QGjKz83F`qEmafWFwjX@}>F6w$0iBo6=yBm)_mC9X zq}v~oVpgxmj<-Dw9D5rjNRJ!59^*9heiuluN{M=9t+abO82-K5H7C6alvX2`w`+O8 zad@F1!=x17eoWi;vQ^Ug)d|01X%d4`0~w{6@F8uQ(}FVj#hZIKj&Ieh3XvthawOP< zL=TKzh0tG2^YLfK$rtvg=ZYNkt6OVm#1_=!jn;5+YC!dl;AOLt!WbeS+Xozu=7D8j zGLqATf$%cost8S3{1@E+(CN&rt_h= zFv1q)(k-ByeC>Y*KMwRUoiv&U6DUmzBn05YtlKw)g@z}EkNP*XSoQVsiHQo6Z&R`f z2z+L`OGz$xj15D44$c)sC^E-I?vVil#h>qq8uY;a1AJ;~+|h-i3vgpJ^H8_Gl#8JC zx#--{Ya(eHgDR=Sz}te_++-W8n`4z+5it zc+1m$hrw*2WD0FR;Tzl*i055JuaOH%BI#M|H~vTluSGw>EAP*P!Fn!s&xEp{+Gx12 z6NVeBngObVlP-phmyk*G_pa|z)>xO4th1E!7rWGtmhJ>p^I1;`rPrn%lL;x*;@(kt zlgjWypidOj`Zut=79Bo5!8xxz@IwauC%^>0!#5J-9hW7@&$Ozl4@jSY?ks~Ify)t2;2?=K+a;=lhSxN^<-BAcy9-y7G( zLNI3b!jXJAy+OfkT8)kQo9S1IF`F5Qu>9P$*<+Wy#y2rZ|M({MVEjeP$gU#`@JI2W zT){Hg&evs7EP6R%2~BZ(LY|K2x6n`R{Bw!5BvxMwXMf@46Ua@yNU>~-Mu{N<4&MRm zse{tx{G5~dc;=!;lbntPPQRiUw0nj^1r1q24&FIo@Xi6;EnqjANR9oNvVR6xCc4S+ z4BJiIrN4pc@I^3dFtV25Ys!{L^_I@c)iPcBAiNQ+yci(=xLi25-d3g0?>1i@1chSO zzOu$2ElC$GD=&SNppscuo)1* zsZrLR0(zND@=hw;{`9Z+L7-&oM9u&579r3(S&=^2m-;7xDVE@kmg=R z!0iIjD6ofq{rA?OtZM~=cX$p%pU6M>@LA~Bd7QH7sExr-zbrfZW=^?Obw`wf?AJ7p zXub`TA;tO$Qhx0Vu=rSmqsyxEK4{((pVd8x92AEdWBQBo!cXWmCj|G~eeWX#?spen zh5xtv@ka1kObb}E09u`0OA#`h5?}=m)uuTbC;F*%G4G&o%*I_Y@AX+rkg{-W0};vL3>eJnN6`tg?Emv|DQ>G9 z+I#e92vRJknje_Cp+{d`u@Bz->M{ohtGsUqZjdPuRUVzIzQ$2r1loj(z}536CMNqb zioXSb6y|-7hD!c+)u9OP-)1VqIH+oU4E|n^TnPm2Bz~>Tx}9I{;C#>cx!As6X3vh( zE3ke#zp+85>_30%>6vm2Gq;>>DaPO31t@JTn^(S7^}1()bd3r|MBIqDO$8(F+bg}! z^9zd8vEc{yDABp0dnU@jR8P_KTb1tD3BPLTONM`UAXmk0!#q_HlEfEc7)&1|m=Ax4 z7|tvcr>3Un@ET##UT&1D>LXMO){BqXNx+ZE2Xnb|TeRO70S0S;3}=YUXG!^k2W-r5 z{rL+L!RhBf7bIye?p&g~wST5gjU`}ZOiwhB_>4$H;k3T4LDz>RY>}qcr?b5J09A4P z*vGUKuGG26Jq(g6MGFfayz=LxSoM24I!+4pI(OW@pY9MG_X!nhncUWqUiB`nj;z2L zAf1a3n&L3TIfOmnQ@{G;n8-iE#%5-xqh#<_kB|F&%O1eov<0inR~gLnV$4gqM~Gr{ zRKn#x4oOvn!YGY-j=DbReyrYhfWb;u6?fXN$6c=&?YE}q+%y5GU{W#ql@@tT)1j`5 zH+A__5!))?#=_T+KkOh~LY;O6xe*Vk5J+n|$D-epx=i>>9PbvIm#UZR&et6s=<52Q zL%GXysWDsGh$(2bfPTj9g|}k!l#EahHVB}T>y)(~DIHxPUX~Dp(I@p=Y{M>C?Phf{ z3uvE36^Hup_JAehZRJlzN|a_@O`fp;WRxIbB2Mgf_*!f4C_vIw2Z6?ZkQo)pt?ZE@ z)Uk!>+%D5*$PZqdyr@Kbxqo{dcEjFj^sqw_@%^$wLtks zy#C5L`tVEL*~Tv2*;Mt7#9XmZM5|lw5^xZ$)H3`@lg7Y7bUxBF3OcCc65rqVAhI4= zZZ~`J=&LLKPUR$N)&TIzHf|+TwD#@aJccKqS*D_8!E9*Scn>)SO`44spAM$h>=Rx6 zPwA+sz0MG{W(wL#6>NFonnzD!lC9ANY`o%2&tW^%%zPnT*T>U0Yc+=Dez#r=PCAR< z-;d-z@wdS|CpwCb*`&@cMPpipMiqx2{d^LBocN~1m*>nyjryvmOX1*`@u}pThI%Nu?3PWtWmu{~i;8J# z`H^2Nscdycz9S-qsKwFx(bZ#ynuF`gxfblCnuJymqX7x=B9IVy{*n+={v{#af>QGl zDhUyV&CQyA@_Mj`*;96bWmW-cx(YrfUnfI@BjrkOufSeW%syIVj2oBzgx_F+nvyikw%+0DhF!kQ?l5C{Tp zVbxgVZbww4sAP>Ey?ra88>oaiVX1{<$twUrl(D|Ov+Vq0KNh&_e#CwwwF*yW7%x%p{clmuQ9!)P__}gkFSn>Y0<`40JK9E5_|v z@4R?S-a!^r@`ETP%BYPU6%>FSIVWdwt^W{mt03;i=)TQLQN>?B0W(v^DQ%no@Pfzi z8>J^=wQo98XYjio0?U#QZk90lypydZUC^%6CXd}XFFU8YY$xnM++a`TvLv9QSv1}w z#}{CB*1h%^+9 zP87BsHP1#*#*I~8-!@~++1X|#X=;unO@ZRD0*Wd=@GRo*b{NoOJssr+L?ijNf-RDG zl^$*;1$fe6`4b?_Gj4D}3(ONtOV6xeCOPvsz;4t$=79Sth6fkJWTP}u06dh^ zrVOpxw=3|TpdFu;ttaxZv9TSkxafsJD+qGW-=Z;89Jsf_#`g6bzzknx=n9G;$)c0T zzIsKsJZ~_14{tj~;fk|+GNWpU-X0@0XN;X0uXWcFZNy=QmnGP z1~~qN{nX3y0>-g($5xbO_*4tv0wu^Mx&Sae0A6gBr4w2oTNxkM@m?nh=5x^!I`i;~ zxnSDcqqa(H|DnX>%7CCV0dQgL=)T!+`93;Si6<@dX?D6d$RJN*0KFrs-t8l1%Hk1& zKNZmn_E z*`@eRP`1D?HunIZsxDLyh|8hfN=2eeogZ6p zA?OTrZJ-}H%7U=X;lOZdI*DYDjDXq~p2TZ<5OG0c70V*~2s z>Lj8;5xGU9tDiQ0j7Ftp<(_Gx$a4rzV?SR#FBeQzET%o)8Un=nE?;AlAZr7&JaMI0 z30Wlukn_*9be_=TXWZQYq(Y4!1sKO-W`*V08E7w1kKR|!Fx~bnmm`jGjBpayCgVtBL0tqo`83V{zGFa0 zNN67flNLgfk{x!!ZLWF^OyEXHO;#=30VttS105}k+(8gpt##2$%X7gWe(z&ZUj|?c zubU4x|4)G&HF5S}ScGmn%b3+qc=;2Qb#&^lwsSKd(`%rEp(`!-j=KO4e^8n@`|d!2 zUK~R`h1AJQrIO7>HUzSK$ZEKGSmO!Am9f6QzL)2Rl?MS&vGPaMyI`SE1M<=6pOiPe zIWiIwZd3!=>ab@|e*zDAu&^{Nqm}~0 zTUXd1(KbCNXSjFaRg*SL87u9xtzrncEbQzuJ0-fix^CCC`Dp+~so-n5ixq(WN=yyR zcSR|FP>`hc%Q0Y2*+0A2NBjKG#NsYc{`sK*5p#~J-}G@)(RT@l^gTU~_%9BA_9-9x z6PSX6v;e%Cbi}b##bztvEZNVhPiV6B;614HBCmf(i&)njG|*mcVM_;P+mr-uJ^SEi z3s7MTt{|l6D!G3^Z4k$sc}MdxpO9reGM^SDofGZlzY9jSVZ|}_jGUxOfX9KgY4tVH zsG%{IXp3{Q_A>9DF8B0=u1uRLdQxB4yWkv?;no(HGoK z=N^I%G`@(0;tT!N#udCn{lS~CsvU$FEr*R6 z3Eiqti9kL|lf_iW9ochGJdDYH8~q|ew~D2#VAK~}zKoi+^$=HV2g;LKwPl2bk1>+V zc2Px2(vxR0)@rq-q)Zvty$|J5+sif7_^N0z2qMFxKb*o_cssI;v4xZMaTE!_W>R@z zB3+WKNixb{?%DQ+GP)NvPR)B`U1usXA#BairmP%`m9wc7YEfniL{9=I#TW#z{GlFUDDz7Vxq17^?sUnOx)<@j?q8n!Og__?QH`%7OVFirP2jb(ebxe;ldHcGo33C)CO=KWFpzWwY7EHQ#(`>qDMlyj-u>d7XRQP}AlVfY1)=m>vNZ^_WyqN=89 z4P1fB9rm4PWRGUA=nu@t4XrrP=D2f*fUqeb^{8KUw92+?)UQYDi10$UEU!-obG=|E zm!Uj=;27;x@90{O&azRS4?*7elT+l8{kM? z^1_Yu=j?kfJ-KCg4(6@osp)C!7cJ?uHxQ95qK-=SqQa9odB~9X_G&f~jP-m
bl zlKxXx9I&JY<91CD%(%LOrSD_v>X}ux_GxrYyV6WLuGLg;DN1wH8#wc#u)M@!10z=w zo>F$9l)csW*QeLv6xWkbwRA1B>Q0)hIZgII_QA zR{JeIofEdz(N)ujqG&-zPgg ziPl_2AA-dH^IqV3>SXRKmV`&eL7YQ&u&jy$n~P`VIG<_Qem+nC=uAPmqm2$Mail&~ ze<4=z-X#BK?H${R%9)6^-Z_4)KNG6TEMPC9bnvCYmE`I(<(F{;=yt`x6-Pcg4m-Sy zt-BMQHOU@|Cu>7^Mt=kuDvx}K%;S5u$2hxt!lYee+4s`zUDgZVZwrphCGCD^R?QA5 zj@*6otMfp;^Y|gbW$P(LO359`({`ayN7`p;d3VryWbhfy0~;tz3%v&l{cF=dN5f~V z8d44=`_t0hr|y_2PBU8X{lh2BkFiaIQ4SjXwQB&(T?+cWSo!^!hqN3QSaonKx_;OK z&jfOFO-+qnCNI_*rnzHOhD7|KoO`g9a5oMkZ1ni7=;YU%P?H=6IHMYCzN09Sp${@h zZi9%QXLF(TrBE*HzyAmOVUP;X8i2-D<+iIh4qp1v?Kt{@Z8%_~|jiZwvIV>&!pnIvp zAIQS#yIVcs1?cHL>I|-$L8Q@A>QWOcRj)oSR|a8;pi96oUIn3Lxo&H8nJG=zzK5<`2?QB$Kcaj{J-7Zq?#L7GL&R54iA~Vl)|72-UCx*Ytxtuk%0- zMstQ6=qPaOnB38P)RQzz@Acf@hux87dxODX>IMN#2=5Uw48Z-4c;CJ6Ol5uu;I&}y z=9>V4jE3akjQj#ysqk4vdb(*0G=6C-(w`c#ijh-g{?kLe`U8W{_N z;r73H!fbW_cF#0j0+45#6V;XS-%3S0}k@0a44 z(iE942+X$( z8SSJKzu2cC4VdW!1%Xy=%6hx-{OO^9hY#>2pYMzQP^7JQ&Ut-c-Dm^cs$MBA40$Pq z1ql)VchW>691Bjz-T5)lXEW;kqjH#D+63VUW@YzgbjKYa%UwS;T)8` zV|Z#rByEoZ%=7U3jnybiJLKdvxi>8m3A?C)5YxX^2sqp;9{)D^1xkNt@#;HHr>y8F zHO3f*OaH-9F+!xB<#_T_Z=lnG0um;JJL5k9DGU`tc>Ud$lnNp7{fXZ7{zO4Zuj_G< zjDN4uff1CdQs6)m838-$2f6KD$EfjoJ$T|#K$KGcC$BthJ1WV8V^KL?tiY&x`y{LH z)Rb)j`yYN}+5b$>SXk#MPc4oq$zU6MpKv3jqOZWNpUFr%>Tu@9R=Dy?8 znVu5Nid*`Z=hPqyJNnZ5S&;m&89zpK<@tNfEF?sV9UIAtNE;;wN2HnY&vag>V>Z~m16L|t^ea?J;r>v{A z|2rrWlcKzR=N@j;4i5Uc|0>=UV3%W;p&x8H{%s04M4^nbf3qw01DSZkz540~2qKXWht>Mgs$B|Q(bDm0L6 zM$_Hqbv3dm@JFIeX)QL|D=kNR@F-zjDsqm#7{(}>n&q)MFD~!}(f3hULaVkxTq}x( zhR)0wr*}&5c^k1YQWr_hG>nV9%#?R8fNUkH#E(X2u6x27>Oca6WPi(&dA% zWOzefUdBDMVf$QaPHeaLRI?3kaQk>E8GT4A{obw%4>8D8mVW;T0`doDa*QR%tDN6v z+u64zsW*hXYc1Zn8U3m$epAY3Q{QliFidV?JA+X=bSYAIfVC`7`wH;H1vAyGP|NFI z>~Tz-EC6?ZMVj)FWgfcM))1KRPR3k|a4N-BD{NCN`r7Z0_PM-k3bW7!h;*84bMGzF zja=mmUoKoso;A5<^3rm=AhGamO#_F$S>8rOlE?B&{T1A9x;$b12APkqjGu0;c0hA! zl_5UNDSRt>HcpJVv%JfWPF@yX(s2q9c=eUPf@{!Z&?DOBXj}5@GaXL%SpckYpPEK# zP3nfH5__lNv>J&ob+j_sbx`r)m#F!NO`fF)&s-&`rTPnRCqrb2RK%Gc7~cu7FU(Sx zK%bE8Y@}={@cd=sVr^|Z)*d@}vB?1@QWhi|3=pTIsoZ;!5xS0ykl>PJl?`s`EQxeZ z{_^+9I1B00wvIm_G)B7k6culFIr1Oz&)uM1b|ewMW6IB_3z*f^mgBLd%EbIJ+}@_|H@EpP?#91wPQR z#{YecxT)jF;@d3XNd-?)m+*$$S)8eL`qyD3TBzA%Z|4B+5!~9S8&)0Y29e z;)B0Ovfh`1f3UoC6lEbr&yP++Ao|(f@`m0n{fL_gIXoa1McP&ag~ zES!J5Yv5q*ZI3>M5PEy4(MmQBcJ`Pj^YNm;ak0P*M&IzVx3G5e`!;{))(+u3-Q2)b z`5m6f**K$!hoQ^2c?!Ss?f3nA+PI=0{;wzSqY3Zj9e_EUjWswc`qItQ+uqI2&DFwL z{-?XL_TDbeV9R{W0zlZ~94vzsTl!^|%rASWs&3g{yHO&;7Wej)*yDqe0r zo>piM@}uv0TX@>pVAv%3Z7PZYJ8OO$gb`&MHy0bQIlv1~8)pk|2jAU;_<1eRGG+ID z9MBCkpTGwf{~bo|%~Xs&_*yvopjTz)7j_2wl640k>`)&tH*DPiXZd-pzMov!!v`fT zV1R(FEid>3Mt;BbZ7w+Mx7EN#zRmynn&9@X=b&`@7xJKHVF^@NU^nM2oE_|30hz3T zq}c!-$okrNdOH9ekwJgq;$Ur!k||kF8!v~m7M45mg<`q8n}e(O4z+}2nT6os&W|j^ z$R%n~8;srfHVm!Q=mo*O9SQ%DOXyi#yxjakqG)!bL!)iBM zTQ49yC?mL=(TW%e*qQe4crOfQqbU9}-v5FHcAjoNfI+Ccd&u>lr;*?e3Hf=^_u`;{N z|L+q_2WRJ9@@H#nBWz^_$nk@>wiXey1hVoc357QC7{$d{cQna%k#R37VvOFOkxF#* zw99tMc{%`H{7b25ZDAv7yC)Tetwe1sZT}uBVr=_AKt;hHbX!+J?=K~xt*tFcVD=P5 zVM}2l;lGH47>oGN3nDt9+9ilO>VGK(t%Ynvtp)d@peVnkfbic$!GBN)fArDv{_ZxO z4mPg93G?%+TR4Fz_m{>I#HOgwdzWkexutV;b45kKAjAG{7SXqUd-%LpEbx<2{?j(} zyR80bHTQHre}(VCnBHCDipfy^ReFO8X7);N(7fT*0EGt3IzMDOsNi{*Tl??e+8Hb5Uy>@Y4-caiYy#h54P|HE7&R){LOWnTB+?^Et zYEtlHoP`b_z6Y&nf(W9G=`MO;j{cXE1099!63pG@7qqR|b>4rn-tu!(_ODAU7^?5C zv7kft-6a+b`~Ue8i|~&@#t%~W_ovxt8;cRNA5AVlCRf@mX?&QQe@62#u5_2?{Xl@9 z+zKj(`8TTfTWuHGYVQ^5e`m^XPC_x1*rl7_)%Pza86R3Z|1y&O<4Nc5C$PxRMHJ%) zcF}v!sDKaSME;D3^8J`j{g>pVzn_kL7=1(QI>v+SBIMtdl=A&(>i%1@(mzi}jC}qR zbQJk!Aa|+1o~MO_>;IuV;P=xTLtjjA$cy&kyXd{A0l>)jpVEJH17;V&f0l#E`hbGc zk4f0?^IE%7F%Z(&@PWiu1US1Nf*mno5dn+6)7sxbRWunf#$Y!UF&1;T+rlJ9e};-e z=Tc#^#{C)Q^P$Ta|MwFL zv=iH%Q1GGc!|sd%6Epnt83jgv|941@X32km)MzL4E2RFLQ3~JS3@9jq5ooueG9%FP z{zp&>m`HB7z@ua2zm5qzbrBg)PYV>Z0b_g6J$g{@M3+r4y>GOU6aHnKfHq#}KDdkj zzZ;JZet&H|-!Iqy`FK#>`?lkq-R-p4e_z*K;KxvdpL_3O%-@WC_^oO5zv$)iW16GC zOrudSksSooZv{*6a`W+HuoC|j=m>taFa2-Od{n9ZAJcran(x+pw0QoPX+FC7`fG~( zOGAP7n!h&umwNy6;eTH3e@y9qOKkjWxGH|`Uq@B}ZtdAyE)crpXYyE}s-xCkGJ7wCNID0sk zJqLbk*I+aO18e*n0I+*M{fF`!##Xz75C0>e*xzire=Z7p5_Ctke&i-Pr2lUR%I4vG0d}uw5PwQV;am?i|Q}U0}8uR>Jy~F<$j`(k< z^?zEfF$IHNwEkJQ`@bJ?{1>ta(<{ZiKeS5$_JkW?ns0x(wb>K%_3h=!Khiz_pMxg< zg))Km{+O~OMno{I-z_2-yY*MM)Aw{pn3nTy#r^Xb=08HXUrD#ng!>MRL{sh`#5M0t zH%v=^?|04ryTQ)?vhcR&anYrTe_M@1r%k&%J$tVAP3(S52!C@q{%0>K|BJWL_kQ|s z$fW=NO3Tl|sr(qI(=Jo+_W`K)Cd*%)^#0a%@Gnr+f5dw5?XdJmfkK;)e-vuHH*tT= zYyVvJ=D)V0@NZc0OE3gl4R(qBo)0e?#&AAbfjoRroOr{=Zgq z_?;BT$owuL|3_0{OqjV##J@vfe^J%^?|7H+n`!(T907(Cm;n5rgoN*@44AjbcCqGX zne%@VC;nH`A5HK*3Kici`|gD2Z>zvzzz)0Ujp@Yxk(~Iu8vXAe%Ku8KK!<01|4Eqn z-fD@-c>Xu?yKnvsU`%%Xtk~XkK{tf|eTKKwS@+W8Ytd zl>O<~`-}kEeFyUje&g?_pX5Vbpd8b8G^`=&;^!Fo@8A72?PtH__ruFp-+#RCcc;HU z`%%DeA<(}p)u46p-zqKNZ!OFGN)r8>b#_NyelPr92$P(nLj-<-Z|C|Bs`~}}o)7bG z(ca_#G_H1UySGvY*a$uS*f8=)$e?NeC2jz?UZCdaOitVG>5(tDI zq70YO^EaElLEuk*>g5wnS@DBV;e%I>Tq%lR;b1z*j;%tbg@u)^q#X;tqJ@J?F1tSp zCSOdiL$0@kr~OD8DmkryWI1Sj+eVr23NEx{#Ea#*xY2{**Vk{{cqR5(;?w%*WTIEd z^7EV9H-{ubX<^d1F%U9(Y+AeHef=1lM4KY6 zoH)>#C?p2USDinTF>R(h2RuAqcq3l0f;@okf_=R0i}wPS;=`71i2!HCPUj zi%MoqNZrrtOw}+y25}j}mWMjflVQysq(|o6c$kw2MLkgzJ1Ut9r?=4LII-qU+^KH7 z(b7&)GQ;tw&v3Jn)l^_erMJ<9_7}jDF8YWHC83_AWm)}-Nyq+nc>0rOEaD@28}%#x zP=ug&OlS+?6{TLoXk9M^>Tt|(m;#hljqMUsRD=Xk^ktT-O1H!0a0L~?0Vb&V{@i@H|IED7T1878J71UflGwrMaV(jmrmYMC}BODn7aZIwdDZz(aAXk`jA6T+XH zh_;VvbM%)^jfDoqvpUmD0FlUx4#lE}n4O!Y4P3QR&hl9H%u;}Ss>PWfi{38OWJ2cM ze9o+j2qnRGndAw6pMjP62y#f5wXUZ5FfSDGfUs+Xj^Q9#dhuhNQEyK*Z^1DJY`Am~ z?i8=fmoL;tYPh#yFV4EkrYW-LCG+#0yp);IVcq{%pfZVL{WU- z6(v2pdSoA5+7+7#I>Dmxg$yM!rfPtJaVJ9aS+O}7q+#03$Fj)gx)(J2seYMc}esReBqSQb(}w9cqk{0VX+U?Ybjv zh5PQ!>tR~R&Aw@Gq`bVm&*JCLoWu1YBlUTCc?m6*H9lVHDM$B5PqwhrDCmpQ?RkWc%_@F^Gy%)z;Msbx#c1+rHaiz9Qj~mU+ZPOABIT zWaK{4Mj`4oNqkf?uyXW$ddNe+#dl9b8yXr;pREfffdt-T%X&(Mk~aC!jl{dHx^)WJ zD)T1K$q|g_&!5jZba6t0Yu;7R%#vskWWG-bmQwG3DJeR|WMmX`LYtvI%T z9vj|3joa!b7S3^vT?ETujq5OSKsvW4=w+cv1;<_eLZbPF1!O8|pCT?3bEuO{6e-=! z_08oGyQwd0v=DclZlXM16p7RurYiLBnH`gMdVBiyF}-?2?C0|)Iv-sxEiEsP@VULI z<+plriDF2h?Grw(nOW80hqPk7B*xfRl9LJJS=6gjlaiLPA)}Y<&!V_21|>2;a9-%N_f^-t($};kl;Y+-KssLJ z#L2aJ1Bm|w8GU_rtt3NSnyU1H`Y)|56vHI_cMa|tNUgq$hwydOgltQG9Jr73b#{p0 z$)#iAUhkhj>PQvAlFE%HjA0m^nb}Xlp%*$>?j+Zfqn@FjDm>q~p9d1mJxML$?>_gy zW$3|+B0D~Eb{?`T>5x^f)$6(!zmjR)9pR@yyyh?{svR3|jU{H9i{_l_=stel+u3D5QFNSzu+tc?th%{8Bu^-uLHf30@4U(t4MQ;)vfm=bh{x5Ndvw!4;n zz9zL^+>=;;PCe4X#aL$KaEGa@)J>i3>D=^2Ag>wVu%bT1<&J$NL);RV9S4dnrh&A7 zwR=z_+y<;ns3G>diYDmojUw2nW4+C%mTP2S#*L(e1afPh;=a2EI7Q}Z{5C|a z*j1#o4RHw0M?F1@Wu(~0vnEb2n4RU z*PLMF;NUp-=}ley6p>U-mo`3(UIa*!=b_xSG}#7<;C5~#6*;+gz)XMPso6S59&zy$ z6TcYUXI#c*?nDwCz2&(2v+7~$(U)W_w8ph1!|E3BA;OnO>@vPSjwbce z?W{E8V_qQ!oEeS5J zjv}`47a4Lg_g+4p>_~Me%!ntr%o+E@-m=2x?x@U|To$#YS@!697b7{PJh^A15Pi_v z64nh30;iWn<@+uC{YBq=-ViTTFWXN8?7SzGzMh%YC13S0w#sLd;TDP;8Wlm$)5KmY zTsr!c7>hAxdU|?(wqk_;eMZCw=6Q3$1qT0X9NANBnd}ouW@q;`Ha32YWh*kRg662F zeyuaR_o?WO8Dl#lbR8@_@i0i03r8zgaa#0rN*XvK*(Z#w1-l*c3* zjvHT&i6IpA7{~ow^gbjv%^W}Uwc|8nNj#x_1X%Z?cUfZUv~zuPwTI2%%EDVKN^aAs zj&5okeBw~xL%O+b4;#8B;%6be{I+o8Yb5SXfklho&fOQVd)`249{8wN;1G)%n_-!~ z_EE?)i#BNlFkn~Wz{*uhx+l4rN7d?^nV>%6uZwKj6Hj(}DhkjhT(n~|dl-0q-{9-i zC-m!!JXcbEh-f7%+Y=O35h|+*VWFdI2lWbZ67IZMLgtLr)Z&zA7KRNwJ)NI_Xr^kP zJZRizK!9}(#*nwg%XWGeFGm76Ivk1p$cYA*S{kpVsV=GMFKZfVk-9KonXMVX7MN9W z@}RIY1Xy;fTsf~h{0~m^)Nerh3$D-NAU#C9>dt;SGv0&{PW78z;b`rni(WjHB6oe1 zcYcxGO5k!olw7iMzh7zhn0^Zx;fs@4*KSf-z}>lQMBi=3<4!zYlbODvKRQ8~DI_60 zWRje|Z<&AjapBU%(QQL=y;wsRflgo@#+Igg)M{>Araeh?Ox*icc)5#CHZEM+4Ey<_ z$>M|aj*QsOxTez;5*wSFpG+&=GcFXWb5_Qbm{xhZPj@q?irl#+eY`s4813D47!8&R z-geq5o&Q>trz{zsW!G!YdBYWF*kh&9^s?zuqnv#8j2If7e1m%jD%?gMR8150^cNU? zb~Q6IGg1&>qXsb{|3UYtk*klK^Uo8C!W*syw?FRbDdm-ts!u4aNvU6*q;XaJd|7JC zz8opq9Qd|LJg?9BiX3lQ+Vn=3CPQ&!y!-nVS;DyCJR9-@$*&)!F&ULR%Y|B!{of0yvQ-EA3c6LC$zb3r5d?Ww*lB|GEP|9Lue% zcx$vO<{GEL1F^oqi79NhLxx!AWz)+}F>St%kpB8bN+x}4e*|t{l;^2wf5~$Mt+Sj% zW3FH6w6fod=$BefZGGswtM;DdtiO9M++$mj6r3-Xf&R{jj@sSUgRnDUp#mF+ds--J z?JmkQDagw|0-VSQTK}qlV&U0bhSuGuGiCT)VHua4$UAA*2x;$$B=^&cH}Xgw>egM8 zyAgI>-wT*S8r#mBB+AOl!cGiG`$J^R^w>r8;1QnkIC&bdGo+}~D9gjvWC;GjN z&S}hj#36w>yE)${xRls^Q|&q_NyF~_TE)eP;l{$=Ub!%+v?5r@N zM!-{J*s#dl@xDzp?SkaT6As8VoC@Cm(wMK#dFr@PPBzFbdS)-@FeBM6(Zwk>9Wi_& zfPi~!%2Xjj;|Z0(1Fab;R6%qzN>nco)9&UbH>U#M6Du>tzEuQcR8YaUvLf<&H`x_q zCY9=%>wTiJOtCZvnV`)JjHpE?SiWT;lb14S8VYdy-r@6}tGRM2`u_7! z7X>1ruS`mM!8Tua6trkFb-i)TRmv2#WcG?zO1R2$O&*sVm7Eiw4%dAqvgLO4HDnUv z)KAm!kv#?MM*(W|A?G6J@dRr%IV9Ypc=r(#GP3xC=l0p0Khi^T=0LG+9oDm(nSgY1 z?M$=N^*plaGVu(vodUz=3dGZR`@Ein*VpqTZ(Zy>BI3ie+&QppAgSY(`?a$%s@zN3 z;9(l!-H)9gh>)Tu#;jT^eQ}_+7YRb7%v%MxGha&XVe2pi%0Ga%!T`t z(z-#COTP~binw~F%#08S;}xS#GAAybb*_#WxFnJPY!t0YwERZNwN31c))Cet z>bZzW7+Kb3(PI~l*m z=1p7~pnT_JG`9_EsEn+v-IpT#lf?pXm#GVtkBbqneuB*2n>7zOH50n+!0q$s4cxji(R8x`tEa$S=)^%#8|{YB4Qz zF`VJnVN2tl+c3Qg)t0>=&O#@O-}9ty-=XnDs>==cAFv>Swd0{`QwQ=Xk5YfCqriB` z=$9y6NRg`7kwGqhx)YztSyiXhgr8D$g8ZhJ5{&`vF>JQEd9pd%mTd*T%B`7W!-8Iu zh0d;@l?M?5!*jM9#~Vg254~6%LcSID6Ny(MU&fQGLY2voQ*C@G%NoQS*VKxqicb|w z?3U96S9FjCW=Lh?UE+Ege4@pOG{<5w*coEq1(Vn7A1kX?B>EAhjm?nD?Cas-P3VEx>O{N6P|h4 z4PrG*X|TctMHJLA>WTxFOM~4#x$Joe>)MV zAg-c;%$uNTFs#ujP`VJKnfw9k#Rv{@+}GyXn#61~C}PK40`fTj;(=oFnTb0OJST72 z+Zf3ZiFcDrhoyd#akRs>h)*N;qSVZTZbL);ss+#YOxn!3!eSzk!QBeXGG)13o)$; zqA8jf4!c|7b*n#2L2JADamm2wC6Q3Bm9reqoiooCXu8*^sfO36=SI?RZ{g4Agm?|U z7ZUf4`f%(7j=z5zBz(+YMa`Q|@dOfvxOCM^M-m*W;pz^x@1{09iLVGWYkP86@imJO z4yPU$=|ilCA6&2C7uKhrqjNQ@!lTqM3*)#PEjwgjEFRhTXg{UJc9r*wkvanESFhX| z-*dlvHxZV8S;T;jsHLgI>ei#U=ro=BVy(gvNDI-x8zq=iRev{9DdBu(gbm8D#G^Rj zIq^W)U@B}RVrf7ff@itfCUY{SB830N+_MB3ByO;#+r=KF68Xz2l)H`R6hXipl;4tE0Q~ z=Y0W^K>z{`eI3`dOMq|BQE^1NX_YdQit5>pUM?vS=LP@hd>>9#6W(i+p4bM@O(Tbl z{BE_63TxcA`J9ne{PAVb=-UZAQlkBjMuuo6zsw!z$Tx^rXA;l0AYw}ZrZwfmVHhSB z8=a7w$u>H1Q`2B&lsoO@5}AaV8?XOr5Y}JuK*&Gp~;bVx7R}DXQ67%-N*KLMgi+>zLlw) z1bWje7uD~Y(nHs$y{qqlNLTWvfukFE2;e)=IRtwuVY=Zyfsi z@s-#1-Fpu?*p45s&3idBLJ)dUnC5l~E)z6+wwU5=y4&Vbb76*V>Wd+?vMjdy%WGk#L;{J(uh_1o(1;~)fOb75oecBVb!nLy$_+Hs?cP&n| zTYwC3C6~d!De*BLI~yaA{}bVW@eSQ+$u*$hMGH7p|;>-qL=hN3Cn`O~M-fr(9bu5IpN9u9KF zAri0kvu9

-k@l8FTuVXk1n__Fo$19te>oO`404Hp{q713mbXDdZl3R-H3TnPXnK zE!T-xr;_94$49|_ni3Kc_(48bdD_=VHSp_4qnMZ&=?qc*Su%MLW0CAH0%1n?qMotC zrC504R7Uv&U=T&1N=(LM%a7@17*2i`>^(pdtuo;ln(pV|QTCeGFZj{PO5b%@TFp&- z3x+SZoZg1D=m)R1Q;VKk6^?j$4_kJ7q41+a&ZQcQcp1vOuI%%R5nt zc;7xT39rd#la~GTgRn3sa=Yhg0Zak{rICeYDw`(&C(cEoCFCSb+j;2W)zm-}?+pv-25MWNQkGUIC7o1!7dT&-;% z6}w(^KKy|gnb$LY>7G$(4)Wxa;Q_&3S!@YfJ5Vh%hfhJIOtUII-fJf>>pP`Z-Jpl2Mnpxi znAi9gUNE~Cy|uaixaf3^$^aQH?SWJgx2R{&)aI0qZuo&pN~K_~W_pxl&>BR8`Q<(H z5$CPSO3`G=pz8b2RIYK1u_P)96{#O*fG*GB8P**+({Us;6fzvJH9~z<_}o<}J(ePT zyh6%3+|QEsWQxN^GJ@CFx=i;Gt)wsE1D9`5OUX}G=vJPF1@iMX-QP}%uEJ+YQ2|8u|$U!qO`$J zov5Jr%KDM`&Fwx;TC#<>Cp5Rt_&BOPJGMVkllJ7{gZezQ6x0< zVq(-#Xsl)_;?kw4)>b8rqbco|MM}=3gy9ezX+J7p`)m^w$2zb>tAuF^ig8bfE@^>*F5a@)Hej z66jQVH-?-jJvj-_ECna^wcZ#$)P^gRL(_0{%JiHo#ER>US$(r{QMotpBDdHqs6mYI z$U8+TtwNMk;ug8TKQ}+sT*w)Y!*a5+UklCZ#G$zmh3?Nq2j>FL+Q_!ldEl=gvN2s|cNeVCRwR zrMYA4H%6$3>^e3eZ? zrsw7x2dE_8Q#XWCLaB&Hk@VzopaUgx>8O|vR;Ro*8Dc;sIF@>wEA6as(#Wu#zpLfv z$qu*4cklGb71g(fnR9q}QEIN~Jyjab-HB z*s_a*@BlaIC$&JDmWN-sdk>r&UU5tFZT5ayWIoYE6I&Z%PEwxpGvl7F? zQ!=TRzJDUT&Fj3=yDJr9U2MmBc%(=zN7Avo+%!kq9@n3`XN+@1gh)hzknXN2-uSpx zhK~J&VrFkWtyo#=N3p3k8O`%|FXoAU@FJ`drANF0o9#?j6F;N|IA8;O+e5kfF3To}zs;vC{$cXN`JZn4fU41DFQ>Qnwl6x0@f-+vUL?y^s7$Uf zvqESXo%ddBA>#6MSpjcb;!2n0+iFP*!9~+Gqk~%)DJ>2?Q4d>Hl?+YnNN3l6cxP0A zA~6Rk`EblW&YkFW_n@Me6I4A5*(el$97g6+(!2E~RdPbDOz4`+9h~}&4ZF&&0zp6B zgy|dYzCP9Ah-pxq8g(bh%mQ^7?mAJBFMkh(Z72}v+#>ESG363Ax&K*zXU&_UMIz5{ zjXyr3+~k9h+L*^l$^|V51+3PT^zH7L7?C&KA_B}TM_#hY!UG$LA<3(5I26kPi%lN2 zE)ZK7LZxCvK1~x_8fWa*#1r%S8H#f!U{IGrWx*T1lkzY+kH_1>A{6aI1Gmf=xzGA@ zuInSFO>~j1nyjB!-HB?Pa_0``)<$+IT3A0%n-{8JXzunF;91n_6^B7r9b9|9ZWlY9 zpqzde`Jm2RqHDxCyqYAxaoDW6XR6453ZHkTw|N-SGMLT;HBzqfK&7JMNvD{^7fU>l zMuRTuj6o`)YJxnDZ7wbwVn>wASfHOjf3{dy z#M3H0OJ)7R?MT*{93*_J-lOhj_ObRHvXi>VfxGLRmp>9ec);Cl%tZF`z+mja^+O1w zS4J)D&br#1&ZXJqaf#0StOc{j{E99yb(5TVa8B(R_*<1nV=QLGl50pue6SLhc&lj@ z6!LIB97nbm?!&oUg^!Z}tzkh@hE?ibABlhs&!(lOwg-*<<7!Mzr|JadlZsVPvpaIVw2TF5{xUj~>?Nh~phUP6Wn@i! z09kaGVozsBA)YmYY0-&o)hCH7rCe~%-qt9d%~?uABo4upQl#9({$5h^sn;0xYl!W( zNMs#wG5yjojn$-t%qN!B0Ge1@^wd!6ZF1Kjo+szE|UlD7X>RM?*l(M z|8>X>#6f+ZTpzsX^VS&8Is*-waJe}>XJwH z0p;7UM|PbgX9}9*K@r1-;q+S7e%{~}f6>jC(@^#nwWB1ABggiwt}Zi33e$F9y8S4# zFOI&zSKX?b?!gv>FN;NHV=>@Gxt{?tQS|0%S7h>y?C1qI(ujBFA>$Y3*4NyVp%sK1 z8?k<)MCnUjxeOjMN1r995*k(TcWj+tO9lrid13@ZX2`VA7*KVqTy1i3HRa2Yfpu+- zfxm)n9oxq8X$)9`5{Ndd1+#$1q&of;=z_5XO5ye4;{>`=q3oYD`G^`m7C0xb_!8Gr zoP-_`bZtd6ov84l|FFuIMFn4|4BX-u^%)mj^3SfZ1)lTZxmpQG^>XmVtm>edB=;5R zSow_BEJ`8;gZ*-2?(P{%xS*iEiI-E8`pW43?F3z>k^wn~+sqAT{YrbaoB8%B^L{NK z$Jw5~pANYzWyy6N<$O-SVJxSs{F(I%wJF4F2atylBot>1t*i|l% z^^1DXUi{#oz?5<*Vs$lOUgo2}|MUdb!GfMgAMd;sir8w@lP5DmMs|yd`}ua^gp0?SG(QvSOE`#lX!npw-Jwtl2Yr~bpbnJIw42$ zyH9;{O&$h{Jxde&tOWj>$o(|}oJ=^Vgz)^gc=$-1(mR*LMg7A&c_1KgVY5+iyYZ$g zsF@Zjzk5l&=mf2)A0|0)`zWshrAL01!S*h6ifR)x3 z4)H`9WKuL;mrUaA?|DeJR%WGR%K~>Pm#Epu;hjakP17>yx)PhlGSHGZC#+q-aXARNRyl{*{YG)TarCzn*yAfip zbjnTr<1GoZ<{_k|lusVDMVvBaB9mv>7Lz(wSgr&s;XBDn=Es>OEl3fWvwbmOAy7VW z<#gx9++-HPvfRj=OJ%*$2|whQ=3-V#5-)`Oi{>I8QJ#zBRSAolJE! z3%4J%ha#Rm4rOORz**Gt)tMrdnp#Xu?hf!-PE-j~r>yC?wNafTBXo<4 zi}Z|)I9nT^H3v%VIydhzTs)s?b}z)d@X4^@4Co+Ty5QO#0Jw$Hc4cPhs_*q7 ztkjf3O7e!0`5k4a|U z2&LW+zyTebL3KKj<$l&GAc{X~PZ0z6An}eLMpf2Yg>)-6hKrlu9?S19Jnr&HFQt8- zshAgqtKI7`?(n#zd8N&HZFzGZo=S5=!mT%Nc<=f7+3VM(6d>VTpvXIzyE}5)sk58h zz}0_PF@`q+*}gby79^~7gNyzy7n#!P+(Gz~^MSq@DHgHYPGVxk<1axGc1z;j2oHqR zwI`OK_YM{I4E`4oe>tuKAAE^G(q=WKFr7HQ*-ct4dtS)sb|r;9#3nGv!g>8c@!R(_CBv@^P&M9Y z&Rv1+POo@EUj&OhHaoEPY_E%sK(TD!qO=ov-^=t${DsMDChlYJDDoEl^K@}xI%57~ zu#Zxy2iDFl(=&3?xW1_(sQB#psBLq$fTZ@r0Q**{We6W9GiRl;RIZ*7?_ z0aCid*R(_8%KEVDA#{1<`dCL!u2<^jBo}tUE&NZi5){4R-Wqvso8Ye)0hO!)%3{3tSqRaZxvM4@|!^?@RK&qgnYO#gF#6|1z2&#e244ewOfqiw2 z>spzSufPO3!uTQ9g>ow+A1WTGdi%v6+AXmFmVA|u1epI{cBkKwBLqTvpgDp(b|4@7{M>7+|OaSNp@FTCkEbM zz2bc}t_fZLY3Qset9xKC83f9D1FP#vFTNnE4vv1x3ZVUp-P&;uLgPmp8Q@m_;Cdeg zPpWzrKpfCx7XX}Y$yF#4^aKd~A=02&R^JhAAZ@fwq$O}WL$vzHvml1gxM|hiSdIi= zqNB=Xg_NXC<2BRj4~X!Wo-wRY5Z(_%Ln%~PfWeA-0|JEuC0FGwn>LtxAPpim z#_R!13m-4OE&ja1Vn33Xj#{^F{ochFd5v*bJO}o#)xA&)2j#wix0kZ!J@s7dsr292 z%77v&EEAw>o`OBLy*L55ywf@~Ew&ELXq~qO4WNVq7#ZjK*yS(ooCc&^jpDJ41sTK9 zJt0wxSmINI{`7&Mmf2dZ3+b%LzsG<*p0d`nrF+n{pT=xPE9nlXmZ~MQk&$U}fci22 z5;3ZSdhIwgYqIm6p4Pj~b02O`sg%UQ&De76Zbl7ejA`95B{9yj&!KcD8808A92@>3 zNVxyblYQ_lnGFB0i!Oxs%<&xc59A2N+OCWpY1(g4N8)bN%f-T!tZYqy)shlRWDb1K5a?L6*s;ADD(oivUHB4iDG9$Su`Qo6^qAehobmYU$ z9a7#yk&+Yhaxzr1Fc!ecr{T#5=N_T#XB$VsS1dnevH+>lMvMQ z2H*AB7#Gp8db4=}hA22GJ^c*2!V_zV3&EF=#;=t)dFq&Y!kSH11`1w*(v!B(b3bK4 zm{mA84y%ile!Zm7V?n;q+;mN=t>;i+UEM9^a*p}+3F1~BwusD3*viNNGT$_HDmF)v@yys5?y!$p#q^R*&2COdZU$J%nWkTFg_#`L=Dh0ZfL#HTu9Ky~FCg^o- zZCMmb4E1ix=K-CkM9lQ%O1EEk5Y5^PrG8m#E1&<;n&IerUbpdUK7lL=>Rk^YoToSC z`&Fc+9g(e;(9bOtQc_ZtLF>XRYilJ5stz%smiqwKp04d0C&B9Ks<2B_($y26qfzhv zB)~(JnXZoL=p-SzED0;y-1y>}9Q*QR9346I?wOrdI|1Ov?qXr8N!&63rZooIO2RHb zLBAT9pikYZGM(}?>~@fJ-hN(sdgN;9lE(%s+&*3Ss;WOZ-IJT4nJ#%HF;PGBnZ^zA zN1zZ4aOKMSNe(iG%g?S0SiCB+(I|YPgR}BL>hm*!D_5>SwzjrRyFm{y2Gog1g4aoe z-QEy?`OsI|3JD=BFt2|I54N!M8+cuNZrL^YW_4}*EKGnxnM6!|f;TW@o;r+K>&Y=2prb|2m z0`YOT@bK{)&*SZvrV4oX*S77Y9bXQqA|%Gz_Jyz&zqZ`%GV1U z7%(iV1?>cGl{nJD-ujS`fpRBhW!WOHbQ8dG2eO1Lz3; zgsaOtTt7h++9>W}i%0i{v}%wxe_I;`?!P+cI!UQMUMos;FkA8g48hizqj7`iQ~kCi ztG`o!!P9*TW`aQr&6H`C(1(JJAy(C(Md@*#kS=WyfM3N7qTH*gv?*P%4^6>O`M_L>R`EsVO2ftvuk7$d)_XRm{K-F>Rrx<~do`Nmfa~Bu8kI7bgy+5$A)T23kl}9o; zR5krLp7@Jg%Ukb2o_MBOudUBCL8qQ`^Dt1k2qEt&EpDRhckFhcdKTFw9;_7=9qrX6 zb+loH`XSp!wY-_T#Y6Do1r=Tj?-9@l(UEoDCl4diGV@=U$XZz&4JSR*mcV|Hn)>NJ zDGR|v)Tt&t_D_X-$}b0Q4zQ3%)zX{R_;T63KliE5?gA4LK|Sx;n9OUJ^I&uqf>*Oo zss!9Q{wj>&+`9-hLs}GA1-t=qdFI@QI5n)iLnTgXKt=GQPbi2J`hI&+&06e-{jqGW>UVSFAS^gWoEuCsFPbJ5~ z)3zuisXUy!GUfMa+i^e%h^e$-q#B2^L?2OhcPCV6Zc)-bEUwhR%k2kUbSLtzmG08AC#N=_8}rQh4p;kd zjwVR7%fXP5&};_ml0#<>BOK>VN_wmgBKrHkrtUvJ4tg+nl3Y+}|&6`3hA1`+bjo7T!1Wgz{6(r-$KbX-74#jPr<%t*MTwI?P7i zJ9YQ)^8PF7YqVR>E1`ZOsfUJDj*!Zl@NxuBKBJZn>s-463VZU?D4&-s4>Qj8u4m>H z6S#Igjwxjg<`jnaM#_Q-xlEk{>++5#rkfgF#v#;v=PpFb-1iAUw1KsvJwlWY%%mp` zIq%h+vL}g3?{bfLs2t9+1(4#gh@*6cZ(kZ28ppS4+PxKrr@0<}sFc{xw1_-ms@SS2 zk{ZWk8PHfBy&uz&DE z&E-gK8S)dNc^)|XqaH52Ds#|9;8lRK-)aXJszU31b^A-xC#6aG>te_l%kJ)Rv$6M+ zIVoMP87BizZbU)H1{H(Di5Ev-+E}KpzNnwKnPiLsv$@Kj^qKoket6P61A21fYu+pK zW4_lxKZ)BkA(^mMzJVK=2iQOMF3f$rrj$NIhZJTL_(%jm_ zjyV&?i#Xxk)6+9HW5OQfEe!pj7vO~rQqup!8(R(qsY;Hsr-916A9t!v1yZ#t-}ogMQret z0+Z*VTcu)}H5DuuW85SQdFeU2}N=?)ZtkMJxf>0{bXZ z9=wG!p)p@!M_J0eMy{;)q>_nKNKv{>32ynWLQuq&yzh*`I+u|=Op}(ByZMgYa2+`X zMf0(#;o1PNJl)&_ptqhMJe!%Rki7qb_IafyAsYY4o{^Ak62(hL$1d@ysHluzB5h>1 z?GG<8nnx;GXTU*h^AVSc4DqhQ`jbe9ejdDjqW5*jgR@7%P_J6K*Np>TKRR6H<^Fk6 zy`ghjnG6^B-yo#gr!-xm z#MIpY!`3HMS}h^72N4z%BM8s<6ZEdskeyjDDSXd$XKs2Q&Q;b+>kY>1%1qTmQ&%!h z=H5FUefFBHP2STlr;2Nxiz==jr^ik~&Gb60L1N3?FkWHYY2N+hv6i4P^W^J(RJf=k zo{HPg^oc-|l$7eoLP1B_+?CJ?8?R_z9hzQc=?glGD}L^GODE`ara4^rudCw@5Eic# z#;inKKNIT69-kQvFqCgq?obhKc#o3@`n+Q+pk6s5=lF%yBH&f;h>ByF@xB;s6((rk z;Ec5hS+PcFvzz=g^{F%Sg4Ve$wda!Iu#0TU`_zWlj+Nyf!{-rlIPOrTa{R)Cjc(>^ z>v&;8D%UrH(l3i31P91P{AZ0r8RS%iB}FLD4Dg2X)F0oHV8K80MgqM5eUVSLu$LB# zeKRpSH`mT0I5?Qy&CP8VdHe7Q_-KEfDVw=?Lq@G*n;v*s6MRV;7aLpKYc29B|JWYy zi$ew7YI89dRyU7USw3VoW4f7%5}<@hMEfp%Ov=&s-@-V^ZQm_ zhUyCdaaQj&))>i8!KrWEAqNaMvSCtAF{4wrWB4lJ%_#Fy+m4!mfB>7y;`o)n~6|sFJutv77(^)~vB+6w9k^Eh)c=gZ$+A@bJ#< z#daQxj+Cn0EI%D7t^3>1#X`Q`t>uWw1{;A{YI^eC?(dp?sXB8AL(?%XRCVtz_ z#->jv$cpU$^>L>0P_=y=9}`K|q|n&zF_nF}DN9)zBZG*FY*|AHGbLM$#FQluBO$pH z!mYA|tT9B$B*xsx*fL~^!DQz!&*gdXd_K>c=e#@b&biL@xz6wZ|Np+fGe!z9*x~p4 zxd4B21em^x#kOG?u{@tiZ7D)4%+FW`A`qtJbV!OH@#btQmOxQ{tZ~K+4o8#wy{LJ< z`(XK3Q3RlPZ;)ElPTuQlhC}Nf)-ajU4e)3}>zHGlROy%F5W=ZoWM|g6QmUgZt}B zpqI`I2rG{K7(e`=3@jGt)B|bKP(v`2p`VbH26Ub2zId{9>{cKd5!~)zaknC1YfIG+BWquI?VIE;(Lk z^VSxh<5G)@t~?qwlv^^rV13`f%rh!9G<01$3W|2?9li3*^rsMqJdc=Vwpw#v!N{sQ zr``8UkF486#yB>YTZL)83ZmeUe;0>=0;B=lodFN2&m(K!`;|sD`7tBrk1=@AbU-$K zHsrX9)1i>Xk!bKkieI^kc5(mWqD_m+{cG09!^0ywhq*xipg0rrCF(i*6p0>)&y@-c zT-v!_>cpaMPaku(x;NLyb=IHWW`USY(`a(tbe$@-Epg}tXt&BjH#={_rL(OKqatZA z`qBOKVUCx3Z%&?`#jaQ(+n<9UW=+%y(p+)75t>_ZcZk*&v{L#UJwb5xFNRNw)(MLq&k^R^!g?xKXBzuq)e_% z4p`!i@W!SF`Y?LEL6w7Fy3m4PuJ#U$>~iHaBM-#tw_ZcJJMc-BZ}WO@sjgalGUQ#V zR9n7M>1C0)ca5?~5hovD$3 zdZxO^6DpdvZeL+i>(&uD`gSdS7f}%<2}`(shi1enf;FTdkH9(uchBvQ8EO1!P?fU5 zlda;Lz=6_3c>kZL&fdQGg6Jm{#(s<>L4)}&SWqmt|HTCe^^BT;QHKwpEIJG+q;;Ss z%%dmPimw*k$qmy-b_!b2tZM48$i;x_)TEHM3jSYd^$v| zgfdF7$gQ%}@dp66{KPDhrC0l(Q}DIO1K`)+snq=or;o3##Qo>gWG~E^3=NG$YT4hBzpc&oZj!GgoLg! zutiWXJ7aV#K;w;%_I?}(2G)a|oVfV`mU_)8YBpB$##6Zrws3+YA?bD9nfNJk_Dqdg zw)I2qv{L)ZQ}00inXqSzOUI)D=O-{B+MS1E7$XBVe-Ka5AEJf1#7cLK)aCeAc%wXd zLbO({c8f_?&18rTbPRt9v~20I3(*1=Dl`C1rG`e8sygf=`mGj0J?I8Jp+OI-Pv|Dz zM!{jP?vXzug^5K@KSK0gjzquEz_*)=y>6BVo;m8OCSM&SI(~L!^vUUmKiIgKlDS2% zk{wm=ZDm@_LcL9?Q&{WgL{UoLjc=>FZ_mKNK)acX8UngS2}K1o2TTb+%(u*eCuRJK zUk_M@9O&FG_Z$;uRXo2{~kA+K6}=h17k?yJSZrY-2n8nvW|{U%rPGs zabwUmwFjL0E!SJm9#rn84=Jh#>5e}C{{6cy2wD04+X3Ee zx>g;lbQ+#@%8+1A8!anor~IsWccT1RlBw#M5#J4=~bfzIMt;e~F$0yx#K8Tj;}p2##A z!l|R-8T5QN@KTy1TZ>~sb-U@xq_~fW*`WIUqz+q8a0YVd>+3I$asQ*Qb=nF@KETQM z?C$L>X{?4v3TC+nuLu-P`;Y)vt-M~3s2Q>$BpRdce8+Ft&Gi-BSG`eR_0G1(I%twr z^t^B)?21SGle&FwwWKQ1USrgfzh4ee^(UC2T7K1_Yg37ZGQ$2Rb>sC9EKMxYtpAr`!m3BTdf_M`ek!d)3SMgq9*Y-n_i*6hjs}Y)3V~RR4a=A3#f8Mg zYR$_A%W^w&^)T|vM{s`d(aylSd+axPcwd&IyK!E*2K_*DrS4xU6(D@Y5{PQz&c@P? z=&@tZt{5kiNN-L89lF&!IH(02#!H1oMW?0Wl1zpmY1o3Te|{$p6g5*aV6zBxUF(qL r{D)EVS|nbR4=`{t|Ba0qiEU;dAoxjcL{E^!Vc;?~w#1a8T_64%=)98b diff --git a/applications/Chat/coati/ray/assets/tp_ddp_hybrid.png b/applications/Chat/coati/ray/assets/tp_ddp_hybrid.png deleted file mode 100644 index 52cd8f875f84c6ba78f6c583655c2a84af28deff..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 111006 zcmeEP1zc3=+6P2Q0g(_8K|;EQM!E%QR62C%7^F)=ln@b-QjnGs0R^N>5hVnq1Q7v6 zN+hJ4?;V}dF?O$eb=|$YA3yJ2=5WrObDsP^|K~aH2~k&7*o{kxi-v}_`-q~fCK}of z3h+M?>|J2XlQVA)g8xHz(o~Q}%WK^~frfV8(OFK%+0NbE%GL~xkyqx!Z;ZTh;CHB% zsfDAd9V3L>3d+bU%Lsv}*z57ToP_8~t3o|JPnc^7s9TwW&0r6EQ#-Q{`>2~*m~rsH zx4!=pJ9h;~QwK{G7}U%b><4xKuosUY{4n^DP!GfwZl3o?LAWg(k>^9%pk?KWH~|m$ zhXXBKte|F2h=V_Da)!Zdovj=`-*^gUZ*O+W`Li8O9UWnApYLW4vqjt`;;atfMm{?T zzCr7hsqNQWPgp^nEkEol#E-Z<_}+?URu-1XGxKmG{$gi}+!?XK$R0SHYvIeQ@Q%?t`|74Z?~=xhnI zfZ3bc%6-}@ZRu=h3$B=lkyqNx-O5=9<#%21J2xjk;xDprjdLSEz?J>}gT1q(2mGLq zA5e~h|LxOZ?>`}rgPJ;7n!)u1Ke3auBh1E324)L$1X~y(yu30(!a_hS(jUyh!Ss^^ z5JGW+xj3FeDv%FhHKvXhW(Ywctr9MP_umP5e<0G7&0uzB;NpNP9L;P^ovmC^cY<)6 zB23ES<9^z;oHDhR^@uzW$5bM+)x0w-A&Y7s!ff8@w?kjI5Q0o_pP7gNLaaYxB7#3zf_5-hMDT#85!B%H{xi(k z)EVXP0J)f^48o|HdYe)Tld85daN9bD?`61YS3PJyk%L#oYPpMPRAc?*Y zFeq{VkC~;Ftu2c6nVXvloH_-h@l}?F3JRYDrt)KQLd15Y(;~w&D?novZjsq#}L2<0~ug7SUKlZ7BBc?JF)3I0kW1h5K@tWYE< z=k8$UXk}&(GBAW&#nc9%+mG1|B|ZXR3Wwb&x%u;8XAiT7qhC;5e~gTXP2UflzXb+9 z#pNFjqaW?{>%jTV^z$>a56^FxI=@%hz`@M7DjTGFQ~|~CK})|;=)iUSw^QC<0p@rS za0ID*q*74=J`XaWp<+2?3GgFnlLrafKh?Q%I_h7=E5LUE9tZ9`-?D!chx>3OYEgtd z!FOZZkDiT?2OmN_DB?ig`A?Sy;#4SRiAud7f(6R${z|Up^H}!#^a?`gsPqZ~v^%4) zD@f)41?&nUzWs)v|FIkwA0oG*Y5|#A{{k%_Xcv@Bi=?F-z%Tzok_#DFew@PjyR%#Z zzaQ$p+XFu_%esL6fa4!{O#DO_!#}_S`TM_oWSKwE(eRHSkNO>!`L`qAXZ-Wmxd&n2 zs9})j8*vc92>m<Z<6h3q z$t@s^5DP*NeuO`!Pw--QkRVKIIzg@1t@d5{&-7b#EccYE<*e!v0V zGe&eG;N5WP&tt?lD#*`q6g(9Fd>my9?-@&*p0a`WFYTeo?f}%x+|=cLYx}o>*6#-X zFVi#fi$RV0F%dX{lE)c9lJ|oZr)*7~oB%fu??!;l@_>*7@_K*M_Rz0)`YvFH>>;C; zR>;ip^N`(VedXU7A3$CWiU(>tnp)W-nM-bMK(cT~= z6hzhkpT!s>9rHI*;{OguiV!<0Ie-ips5yiOfk^*?9P;}V%|A}k$cXjpC5^-hD2d|7 zvcmsfmG^Pp3N?m&t3LTDWu5>s=6z~A{Zq<32r}@Z2=ZHU{294e;A?>MkE!z@$nyDD z$nk4T^9h##Hsmw3j?5dM#U#kV^WKcUwCb9xq^ zbtZmw;r*$iH~X*aX=K|S6$|kqin5=Vq5r#12{JSw${#+&2+MD_N|4Cn+x!27UI_xJ zeTE-CjJ?2#6A(a7{gH-!jl4d?tf#B*ts{#={a+Fm5~x$UXdg{NGCX-^@13yxte6zKQQHVGq=6~Y_@dB1_{kc1+t{(lDH!jE7NQDPW8=xQN|t^eu%0y2Q0W)>uj z{q>oJAEDKMjieFK_SZ`qi5@5G2~lC%*Vv?Bel7}*Hz3vr8X2mJ{gBn zWPeMJ$lB+J#Q62x<3FYt6hMZ*Um!>1xDkr~f5IhSVt7QlMv2OQ4zULpT9MI_=$x6e5~=) z_Ro4WKSAhUPW&RV{(rE0WOw36;)fst%c0o)zf$Di+kO8jGDmd7{&g}(COkyZ!ViJh zupi9rSD5-AYODNnqYDo*RR0Q@fBi7nzcaib>&jmt^RJQhzeDCyE&s1_q8~Y8q+I#`W54oSU7&?>nl9u)qEB<{@L>xUk2)5I-Vi)HE8~)lp+G$U(dPxS+xhUdyZnZ{|J!$ zD`hB*#Gjw00=^5wA!FJ<@Oj%WLW98fhK8R3;E>A}qL}pWaBcsbHY&1|K$eMupz;-^0X+a-O`frFOMmv|8GLSAmRN~6GA@kkBmaU)P#Oy%_?(SGk3&d$NzbKXyobwy6|^teDB+wKN8<4 zNBz!wHa>eE;*XQ!Cxw7J!vWRrK1usMk8Aw*OGTriGC;GN3!O51|F*#oQRW-j4wWiL zF82Q!RgTc`&!fu!t0k`lk+m1X%8{#}|JIUMU+&M32zH45e|*U+q^wYar>rb|O>6Mg z;JivH-gNHrjIsu;^GW*V6-Z%SzKNQW7t9YPBGvwpZe|0oNmIVkm zK(6)qo6+ox{gJcLKQEenT}}K$qZyJ2`r&8>UU0_E_r>xRe_=EOX~z);1FwITW8?+Y zl)Rn00(euqr3!cpDR{R8Vk6Yb)WXr!?&D`Wci>@w<&m3VV!VQeMvHbtR!ZC5=xqX) zvv&8>N6Y8Vkz9^>f=@l6MkK9)O}qzFgGdS!)0-AUsZ$Zl|A|*bhF?~apP$klQ6iGv z#O@tCBMuo}jyZTx8W*RrWT`&hjrS9XyLwr7u|OC`}T zKl>*sgF&{tSz3b@4db(al0+2yFhARZh7m{X*YJdd33mrNkpsE^E%4L-rdWr)w5KfR zlsXr-9~!owpBi!PgUQGjm*fNdmiPEI%vOubg?>1pXe^p+;Mfv1I~F>TI?-j>hnJ{j zH8hqbIE)i5o5@ls`LPr6@kxo$(Bc2MW5CXLDmAFUSE^zb*!MT2`fkTtF4-Q@d7?nG zheKa}VX6emVzhr*B%w4-hsXp&?QsIt#-Q845%k^gFSv|j@@g^l4w10aw0_h`PfP)G zzw|XWw@cm@tb+NC7t6MbFOTfqdM>@-dssns`N7!GklpZ0{}Ihc;d^&eD=qkLE2v25 zF>ZG7Y@Md5-Bb@+uc`0gW$V)X>y2tYD zJ&WF$jLxx=rXpxeQ@PuW_SQy>#K|CnFu{PZ+vu1VX=nW!Ow=A`Ysq$Io+^vlL+vNx zohTVOG2c8zxw<4rA=^nYU)}V$iL%P(_2HUz(fw#>=rjyy5ijp03_wTwPsgw~-}YLb z=3ki^Phk=*W#OjA)ZW{05aJHo#TBhreVv&BlTL=;kOU2*b#L%>@fqrZC$CGR^v^RN zI#jnxe0c|^WNjplB-GtJ3HjYd}|<5kdjQCLlveKqdEZeOp0AOUpq0KW`OhKPF@ z0r@>f%^Cu-ff-pE``+6G8nzJp`ib@s0}FA-@V|BHQNj2#ix5tOm87yT%7f)mOwI>v z7d$Z{c*ho7DlLSIytcIN-+g5q+19HP@yt1n z{lVkpRYf6|WMIeL=)w4e)Wj+oZPaBzbe&>d-dA^H#O(z(yPpLY6TJnS?;1~jz~FVOgFwLZJ?MhcqK2Jwk=w;ZBo31Ned~mxIio=$%e6i;OVQ_ zZU%ez18}RK$IO1bzTE~Z#?@rUFGF`1d(2I^io6H4vIkReg-1GItGMQjY zh^{5Hf7Wdv67SM=;bo4d8xznJw_*88UWGzjpq~$4ZK;UX?9G2JBvrC@2R6{JB-5P` z74ygL4JR96jH&p_WQ-OXPmkg0Dw*jrpDQTXC}b9M?g`%G>!s*qa_Iec_=UqA_6~u9 z)0srzeOSBE2ZAD^qZcooXRd*|l4-=8pVniKDeFyoDMgK=B?SUWEf$VRSrnnGNZi{_ zATZIa^=I)fEgF64*+MbQ3OUG>nje?z?>E9He4}_3_fvc^QUx|tC!#aa(m?0VBYJDy zg!7YpRxsguoHS1t+yM@p6upWEKT!NAT64O0>3q9$65N*jUu=a7o(ir%c`cx7-+^9# zZVI?Cy)UB)_^g{`rZtMIfHUl<*sd0&pB;XDtH`J~^rb&Fb|m7WC7~-H_Nt|(8@8hJ zq~qf&vI6gjkM8Q~dSzv0b?@4>YpdSnOi$cj;H<2!zQDuBKZ=8kTmEuvY&odg`o$61 zZN}88@)MVE!C5QABn|1TbxIvx%A=#BhYwVF35s%X*_0RZBIST)%OavkwP5Cg& zMh^XAcJ&&cO~0GRax||&%(5)oQ%Q)3lGX4uSg|)`XvM@5h>3~EJJOY%-*mC`)%g0Z zEf4zoj;*{KK6oCNw9opkUhLj>L9|_PDV7n9@1vyb6B8CLlM5kcgwWH|&+}V%Wl7$( z?#}XFozt##Kh3J0MYOuUzJnH9NRF1CKH$ogE8`EtsGMKk-znlT$1wG*a7@*}pucLe zCx89?xpNyPclA!}Y4^;;l|M=3K#yJ}CtpJ+ccD^0&!Frqta{Zvf`D2?9h|Jc*FbwW z1|e~&&!&i7?`{79-|fu@q2ycxsz;ArKPszy4;QY&;H;BCK#~#?HPv@*dRkYOYqkw< zsKg6g$*SFnAx~t|e#c|pe641Cz2*VE!le}pb8}8*Wo3O?*|CdRG%kLAIhYH?6n3Fx z9BW2XeRo4IQ3>WL&X2Yj-X&a>tX0R5*jk}ixh@m|dyNBrVD}15nSHc(VVKjY7dyPX zzxPUHq!<2vA?_UG8Xxqxcfcv98EOw)N>`4};L-<11cL18CP@bJ6A46D~J&n zdQtj3?iqt*N&icD1f@zQ zHIc;^EdgrO;cE+&+2S7%U3u!i1B+8P_vwLNhCH0(B&mDQ zb7kc_hlYk)Iy2R{gxqIM!j8)D(3%in;E}N>N6;$_s2(|DB_q2(0+$?w+~qyvDzY@3 zx&`M3yysPBY^e2KcVhy@o_RA^BT@3sg--8fcbMF;XrLb%U7D4oWG!Km&zjX1r(wDK z(K=IRY#|vFprwU3Ir#%BYHBd_l$ZFzH+G`W@10#6xtD3nhS!%~T6+WI+ zErCC{O^r;|ge#oJ)s+nN^ruK@{rxI%vPdcT&jti6`z*98>^#Wb{~|ie7>2>lXXG`- zzhuexpo68#D$SdSJ5ciF{qqtxjjGl{fuuRlR*^sCR^V9MGgL)s*H?5z^z^F-5A{pz zt1^sLm?<##?*+^P5V=|yup-T5Q?HqNX4kX#Y5h^#{J)#N9va;9HizC@y9ZT}f zkO?Aazs75Eqt0^0$$&OFP}0OC*;(lLNK*|0g)>W>4bCj+vpNz>*Q$5cqgnT)BA1e6v&SSjjhd*#P%V~@2%{3sMA4z4 zn+=xw<<1k`dreB+MxD8B z9(y*R7P#SfxWrcS4lDwqB>z2BRLU>PUDA9u*EH$^E8jG(hNh`Ta~W^Vu${tCq z5L3v7l8IG-2y`Ludcf(Wx4M*q_WP%L?*z0ROimQ@dPU+8Nk9PO3sqa*0s^rFTI{-| z%WJeI8_jymu{_)Rl6;c*nN~&y2BD;96MdHfTmZP%C1jiyZ&$m@z||7I8{w(f4<(xq zylNTRu2jznnk`mMQ`i-aX|RW4GmhUzKukd3sTKKrfmyRqB5&9MF+F0+Qpwr`OdXS> z2_n(%NpgeLXA%S$Sy@?6Ke|LsA7Yu^c`X8tN;oiqg@mid@ny$YUe$Z7Z$EKJI}Q*1 z#3%;=?6B+G-of1ClQn2$m=_s_iW-Z?oIWrt4 zc#dsUP-?6-Aq~WEoG~eBA_px0rK#sy^%qDJ<}AHu?_n)WSIqdFx~uoRtj@d(2ZNSY z-mk8E2?*&C2!tEdBej(y!9>-9c<2_RxqIm4q3nj`&W!i_;?kEZ2MID|`@Fog!}~=kQ&y1~#n>{3OggPR_i;RF}%7 zYS(eJ@v*UmTo&IP^HpfS!431+Hkr*J6XAuOF3Q$SS2DYinK>`4HdYz3qwl0DMr-iJ zOq-s(&Gd0imP-#c3e8n6gof4~z;{{_yHz0L*HbPpFW+(J)T480s+)z-H;i&w1faVRRlX%5@dNh2_R-X751ib3p=)S4k4;j`wlO-Ij~%=FV_PRd4KM zxnb$Ts#&~~l)d%ffRmBiR{zlurWmcc?gVG9mNz!;`-=sQO__~>N)@^K+nR%l*JBi z)%PY|0U^MMDRq%C2 zDrD=pW0JD#MQqHIJ70a&Lk#V&QYqQn+TPNP#mul^to_D#vtW-w*rYOxZ@uQ73x?T} zI;Z9cap(C(37*GCP^!7Ta_ekyvSD6%$;?Vx_SiHLglOWLcY8tvTxN%)tlLv}5{x=2 zlHy3#x?`lIq);)b#B~j{m+fnPtpBhCga54GDU8(lrxIi4wgq~*Tq2yLERwY@829M{ zUbKw0C2@^gb_&9zSq)0L4J+N-Umh7?=Z19Vwbva>a;n-X9y6b>wSBOr_R5tfCjJo~ z>%?LP7FWVG9IDEEsShao>}(byp{z`)Fer0$o_xv?B!wqr2cX`Nk873w<-M5B^tDbs z?}i`Y#c&ym_lR7-R^rS{vNOO>34TTA@yN3Pth`#NJtyz_2=km=Ai$;a*4Eb5etwlD zP}}}ejSI|+FWd%4bPsbyl>5A;zp0&{d5SH94$Hp~OCm+Oo+iB}yfx#3N{`>ANX*u$ zw@QqOS7#h1v?rhDrYmZ3RbO3hPFz@U#szR0AeaemzKqBWI$;17L@->MW4UO&wl-F! zJxJ+AVA;Go%cLZI!DSr>mk&<~L~^o03njSxE3CjKrDS!%4}tJ$?{@(195!HenHXYc z)yWbfWm(OtgGA6W`9H9!iCF9>BsqQqLdqF1OmUr>riAkhP;|JudvlnFxmnMAG~0=D ztu428ZX{jdtzoigk@p0)HiokRF8BF$;60=bY&qkVBi@S*=ok-)i4sac$?jtTvf_SI z>+CyZsRG7yLyu)5E;j^UxR455yQ5Q0B7y0V)^jhd^hZYZ2k^<|sb<=Rs%Fl3-q7QE zd(Yv#v9)MI=%oBH&j(}Iyw;bd_^dh@E`)`-=-|l)%E&6BVFd0S+Ocv77>v!6WGRoQ z93n7^0W@uF3|08)$v6!f+N^1YO~jed>C3b*`N%2uzcA^^GwkqOs>~bK7tsn+d_+~3 zU>!!tD1JaN(pq$vPoC>)$aYueoBXkKAxDUUNjoXX`Zae5?O2gkCI@h57NKF>%usd~_f=U2UdV2cm>iQVD2VNjjh^DiNJP=MIGP|KsP!sjB+A=`>4kEk^@-cDQBhpnQPN8M>2%%e zLilOZ){%0LctvOPh~>pKK+&Ii<7f;4xRfAC#HAbrB{BY6W^r19k$fH2<9FCdv28An z2f6lI+<4fTu=2urzMm4;N2uxLz1w-tG_rwgU6&OI!54X|^Cuq-#&@}V5YqKd#-3=F z2yEQtLV|bk@$=a6mm-n)H{0iPRwrk5=Gb)Pol?*vUap?1)7Jxaaf%pDL`vQ;E$V^J zB*2lk21~L}cJlA)8Z|t2{rR(KB~c#Z&U7sziUEFk(K3EGW#I*6AOdcHTH^ICbi#vx z7m-vj+}e_3)wv-+%GufK*H@WyK|i)S<9UbzbLEJ93+1ke6bdDh8&~|=LM}%78YH)h zDYU;FR6R1jR`t|>RE0|V-aUWldctGZS62mZNIX(m9YXKVP4qc?Jpby^lzQBpx42^k z6*ADHamxn{t34A&RrX2Wlk)eVv{fmU+o8~wd^Ke=OK)VZmA}=m3rIjxo|JaT2QG_+ zdo*lyStnmx1Ap`92kuAS6LxbU*zI7+COZ*DT#Bp|E6z$(!)3V#uY~Ed8i!pNoFmP> z6=U-aV$VISS3Y4idt0~23`Q@ombs!#raGqbB2NRS`7udK1KBHUr&U4Q28**Lx|8Fr&xHHVegF2XM6 zkgH`pNnP_gFAIB9sylfGPkNO$nGIZ;;kyB@{XmJ(_Ep6qvd8+VH!iEfFvJ-lP_75v zlgIUz8;h_iNtN!Qt3BKfG*Ysr3`?qt@^m}nfByn$3Qs3_XH>)v2ADkOhNP{VhYIP?j%^sbbfgBmhM>l&1Yxp1`_c{5_hZ4 zgf^%5KEHjkaAN5w{(g~wlk$s^ z;&q7^ugj>VvlXO=B?1|h{hqOE5CG=el`Dd^SZUFgk{wRPaG_dW54-?Pm77&po|Z3<(eXB zn$Q%~ppJEXA>&f~U6Iy?-5B<8DpnG!R0hf%JLK3^SIG$z-haG^jRzKTMOvf^E^!r| z%sCJg{m+B!L0BeLD+52l6Ak%Le1aTt*fZ;Fv-UF;!uY-fz~6{&?|bRwuzMd})_nyW zA>LBHDqWhvj%(Jk1j`z4-^A~ZNV#+nh};L9`GB-!r$@6)pKa+i@KpnQnT@?yava-| z_wP9k+%sM5EZgYC^ds{UH`3Vz)f3{5QBUuA?G%552^Koy|KzQwKPkAm$snc=-j7Bn z1G_I0O$hAo*0>6_ppDD*2rA6}qaaoJ5pZil9h~dl6~=1LK zYyYac_)2s=cIf3TvBT}P?Y!qwsU3+T>pSJ4NfS^sabj>1yYG_VJ{E|3cGm@dTdaKpbAW-qkgq+m|{VH{w zCx*{}%|rle12!WTUmKJrzRhRsvrT?e{CJA`J4ihWZ|~>W&+0Iu0ANt&&ZzBpk|Q zO&(CGwO=Ikab(G_I*Lp~01RM`=e-4DZTR3biOPOEH$JNPWn4N4L z)-KgSH$3++HOci!`I(@G!B+|ty4Qqj$s1@CVRtR%?}YBMT)X_%ri#j!JMjLSddsJ_ z!T2reskG5RbJ`frCtQK6zlI|QURF0Fyf+-%4~+Sm$Y5DPv5GqbG4rjq=Jpcp8+ej( zLEyoeyWw=k7oV6-bamfg&?BZ&sA#*M-c>Q7`I_;Pw_dvJzB65gR92gp-H7P7?*W|j?^TdjE$`=XF>XavpX;>Js-Jh=Nsq-<3~TZW1z$+2^Hi(FA2Gb zfYprK8@;gcx8n_^Uh%3kUP%YCpN4J(+`ry3JwL&#IXq!tAb7Kbe&8UUVD%o)i@}Mz z@$kHsre9QaM@B}5LZML1mG$+^pmXOKM%$9MWxc&cu|mhTeO!8p2vtDrv3tZpUA;7u zDI#V>-Wgk_IsR^4(=^jKB_*ZE z!@;Qc}V-toFXXlca3l}e5#5m zFcKKYx4!gT_Tdy&(D>S&@GQ-$#9jQ|UN?okX7{pcZ)mKokK5kt?V3zvbD7C_8Qt0&)CxW}%)G~&?FtC!tZ=SFzn zy?dAM6=?xV1^0c}jOQAN?!UehQbbIlG*e%jnRw%ZY09&sdaZ`C95&s_FLM)Hw~j7| zPuwwa5iqb(yKL#(yO+Y)#n$jJw{b(8|4uv-%P!x`+Y$UZICL^&=tK4O6)I5f&K!kU zP4hl}&(b$%V%az!VQz^m(#+wpA4%Wp4$phIn2vzj`z>T~Lu!qK9$hiM2hnDAyhgN# zU(hB6cbe&qoGcB8UQzHa{Cz{R#n+FJa~T~49O-L9WBSx^L(T?Sii41%DUg)yJmPZs zIAjCGX3pCk=H)GHXq)j3Cj^!hAC@z)bk%BjdHUXF3En)h8*&z4WfiN)=av^?_A>=> zV`Ur*Rha!^{UOn;2{EF|+ukJ?@O3R2^usmp%wn_Z6wxhN_EJDAv4&mm3zvJWqMbjt zrA)kUu3nlRrRor)Tv|y-EjpBROdK?J1z#M$!l-Jz98QQ`jOI>Fo zVa-eSTuksdzEC_Z<)uyJU_SJkf6yYHR-`mqV0-D&3Fq;)7tZ{P0_kSvS(caF%y%!$ zXKG6A%(j2_aKSQ~{D9@_dsX^Y%RFp`8;37yLgu`fFiCT9mui$p4qV85$S;#^RCio8 znn5H{PTkFYQ&`z?34-Z9Iba2tP?yOhbi*M>rE|u1E%{8DuClk!76F(Dg)AAs z?j7V1-B-L=ppd4KCcnWm^K_wi9F%3M7n%KU5K&-U78MF9q>~9-mJT^G83ZMq#)v&i zo?s!1N6s;mQj4ZHSp9-@dt)_JVZpmaB1`8E*3A`{MWwfmL_5Qs35gtX8ch68^FC8r zWGj~K!6xqYo%UiSBWA_fO|}omH?xyBEC+8?zQC?RO}#w`VaHF+JK-mR-w zuVV9iaqnL|G}9g&5+Y1QLt{Lsp;zbwfQ?<7B-FCG(>7h09@6E>6=g?Wr4|Cl`V(E)Djm z=e}yw==bq`)0dGrzjMX?5`R2lhq6X=ws}3)U|(<+wQq`Ow7%?A^P?%0Zh#uk~EEi=`zej!>l?=sft8=;dxZ-oKH}KFQCj(=knU zaes@PGfS1%U`4ZAe8bf$7i|9gZVE4MgMG%83a6U$liKIE;{wI@an25lQQ8#mzcB$5 zIDJ(gE8?9FZ~`}@D$l#g-PY-V$!P}-N~d1XB%_8C{q>I?DIc_3ow4q|dBjRJQS?Z{ zVYfp@xW}qIT`E?YRIe)nV!ws6P&-?*5~nhoX}ZO)!9hqiL22khL+`@WE>D{W0V&Zz z_>`&vgTbyheJu9QNEC$&cyI0uqvDA;d-m-0mOPQoXHFJb+n0TuH(t*?Gvl`*mYZoY zkd_N2X({wJ8#rG$kjY5Hd8J9W=8erw;}U=C=Wj_b$2UBw^Gdlj=;?mw{8NfN<#_KD z_1x7F0oatM4Q;yK@I-6^v zlXhxFx~=J3YJ<+Ih`H-})je^GLslv>y@2MU5{K8H6NexfZ7K)xM(9Hauiv^=lwEw1 zo}K;em|1fS|46zf9i0r>Zg%#SD?1e&931ji0rL&-!d*FsZCnYx2^*|FJUl!sX7Acq zJ>k8uM`LC{Hop=hBI41LaA)qVA~K2%B5_!wZJdz#%>{Gg2@jp6#&r^6O+TMY>mjah zFEFN$$*jg0Pf!eP%y3+j5xn(IJ6~1Iv)bkG+EPV+;Z6dHp*CG1Ejm!s)OT&I5->cU zRlgTxxj4p_J`**7VP^QkXgb1>tf%{q*I|~bIVVOVFFKiL57Qp`Hovqo&2Ot$CeJ#Z z*6_9<$!g$2@uvHrK1nKa|#kJn7e<+!6pLK*1 zE5h{3C5NW~0a%2L9L)z_4WHeai%*o(^`^ByPNqA`QZJt(eLjyy$RSxMDfA#6q}SRb zgk(F{K*WrzrmAhF|03w_QXM&Rgt1G5SG2`s3gK^ZEv&sQ%LUqH@B zd9Y+G{=xlJh1kR^Lqk}jEzh@Plt--Q{Kh{q^k$d%=KDn-%{k6@ZSvUSyN7x(FGI{Zdg1dE_IzS zx@mZ(DVjy?u1$C2lDepY$n`_5`xMfZVti^3N5nG)G!K>A#yU09$t)Q)C|5=E_$A6J2g#}xd^altn)oaw!;8EhR)z8D zPWkA@tgX`15nO%Dp3jQ~7~Tp{Z5Te9V1)GE%-|iZw}@bjVh$|`o@T9on8UXX-Gd zJj@j;?GffTwKC~iW!vGDFE(-;-|TGv5|p`W=N#1vnMRu;;h^_3PBzWz5#!md{JX26Q*1*+D@)|j?I=Fo@@4~mKCF@DCVeqw@RwgWedXiZ+2e+!;yPoqdGoNHpuG{bw_cdvy0w2 zu4SChG(9Dv64AZxHx2|&_s1wV()r6pu+hmFWtctRf|dAic<4b>M&E`pc@FQosh5VY zGU?z;aaALB?x4r`_*kXg2Sl%uv6o7)H2OLfv1mlxOC)lKbcK}|Mj)}9A-V6_q27mIgm||QBgm)r5T>M6D?S^(ijDkG3TSg;i z`sSHB&vK8og>{Y|pL-TZUUv8cUY0;TjF}h|?q*f8@(lSQOwC2N*qbt4)dJb+gfycn zL-Z-^ScQdk*F?D@?njCGs0H?S>u`=ZJULA1>-o0PBp=Oq&Ig-p&zw=fCf2f2bLvWy zo!YK3i}rgHwqiTVrW!9V^WPpH+9WNr6CbOhY8qEBz&4o)ni^YLk`8uEEB7PQ`Vk^NFt2x~00GP{ZGU#!5y8)J^GR3iI>v)$h z;M3zh5@%8GNE=-USdZ(B=2sR##uAYhjNmzCj~l^AqsxB3J>7O0>d;&a8&pm5-74!+ z^jhb=Ussn6y6@6-b!@51JlMhbK>^}d9wpUZ32)e10fj;DVWxoPL%s68z|Y6hB}zaJ zO>&#M+!}N_P3km#lo$2ubj{**7I%-xTniZrL#0c{qhpz5IO~i^Sj?QW`&+ATmoUj# zah1pwhcvrG=2(jiY03#Yi!9w9Bc!Z)d1uXC+*PD>W1dRw*&|GtGZ*);tmYsQ z#q`XMSatx@aP(adQOgC+3RAq%qfLBn)7p633ewu>?tnjjS~YM*vH64esTdUg=Y@P=-a zl3LHL12AR~XN?H+dQ8p>7S&(0n>`Y7dit>kWrrJf!p0`_k~jsQH*M9sR;sADICMn7?}jg7-)g5oc8j6W zlyVwVYf5tR`ku>CJ!QO1R`+g&@cHy%D3wycmS^bX!a_W+=wpGaOd*@}U-P|Box_`^63EUUHnB|@l6 ziejS|Bi25R(YB_BWNKKYD*y9pTJKlO`8g*v0kc7J{H|VqT7uD*fAjigPFO@w_yN0n zzVWN`V8(rWCqd)R8;g%87+)ro<2#(^ofA4VR7iK~&DCr^&!%x(^oXmM73@_QRL*L;S@*mnwbDLQwZ+?4 z!^C+aCb^ zs>foUaY7Id$QPjw${?oEbpu_c{XFYAJY9UoF2 z+gZlK3u>()K(xS%zx^Tj#j$D_J$*T#^0#LQhPP#e;!0HWwxaB*#gtr(rr+LHR3oJr ztm+bHvtFnmc#Z|8(j#tS5>afkpD!3|wCoeTQf%8Ny&1IovgmSU@Fyj_%{Xx{HuXft2k;TD=pK*0Z^2$6T3i z=8{NVa2N2(F&E#I?W7H&tPV!su`u7kX;gW2_%=;&cz8b7$w-P$P*Fc;1C8f7K7A_y zv5R$=tnsY}E*j+-pE#xk7~Yrk?;CpU9+PEENjG-Sv?$Z3*7i3oxklVsuUhSi-yUSO zJw!}xpqeqS8v#cim2J`kk?qsz#YIf^YQ=5=u}UuEPa}$b78BY+@RxLM``vcClLa0` z=w+E_yhlq1Y27+~Mc_j1?(b zCaRK{*%4O;-9^fx$KW+A7>Zx5V_D>x!q0K-?$w7!sbp*O{b#o*+!>Iw?iLrgar1$TIS0;oZ#8!uh9eTz3b}-Zd|~cUvSavKa;q z`i@kJYCvyLi*Or3hXrK=$Fm$c5_k#ajMyOisAnFPx-N=@T@{k1YxIKXK(%jaMhwIk zOX_h(do$@?5NDiuF?)VQ^D$kC%0T6ViI9fTjzoM5xm}_Z&m?+3(R0=>+f4}ND_o~^ zOfC-73m8;zTxB8$S9%86I-Hstz1cA2n)S{a^c$#frpYL5H)B+%ByPP(*?azY&Qtwo zlxl{YjI0nML!}qJE>To;^x(Sh( z9bOfgj0}1BtghP*lE6cQIT=Ot7F0k5<+STzkFk$T#)}5F`HRNg@h|<%GCW8@#RHcnroc-PJ#<7IKcy5-Tk)` ziq`MkzI~gpuC9(PoJOKLnD?cf@X@JrID3h?jH|s5iHmQ!s0{>|HsjOPHEO)36$S!^ zGdW~*0nejiPo#}h%5@c(yHF0i@okp^;4uvIy;I(ue_9-SSZ@bzAc-}CYhJyMAC6}l zIJ>UMdivqB-PQ@OvQ4e$Phv*AypP>P z4yo65uou;n0!?W;fOKGRs)J=FPqO3nLY0 zHp_c1;LFAmQyih?%LHclo{VXI+NcybRzDJsutPaORaXEIj|EKt`#eaNFIllkx+@7c9~F$ z+Fo}~W>+}Ou3t@jcXH*zwB0LQAU2Itw2)vBbDWFD}&(eWu}o!s{$tIUt&ZDGN?)yi+FGQaoaaxvQsUd1GhanYI18GyN&e zI3do%Gl!ko7_Xhhz+jlyandg7f@ni;KRUbfTve#td5CCvaKmhrQ@pGK%P0pO+`u{3 zG5|Xpn{S^E^dtjD+b+>KdWoYOi9~bxN;;c+j)F#v8QH=H+3In;v(j@~*%#`wpY9DG zFOX&BieqQbV`OZP)?ePH>Mj(d7G4RBGFusc$LCFwv{Q-CsZTae*yU_SQcZsuTNOVP z^H|QitQT`0Z4ZOpc6F?ANyNUP-Y*7h*jBMcPn7LhfyuxvEO8nMPv%^mE&mKxk-8Mu zsz(#E{@d09eg`2B)_0mCtvQGLt?o!NtxVwK&M&$na*3TP9*2XHAly>g7KXe0y>Wa_B)JSVb}YK z5GqX3bD$tZr%nf)1Nx!(n-^NP#nkK)vy!i=5K+9Ms@Y?xRA`E&7Ff+3#z76v4Ey$B zxVft!@rQx{(y^S026p#$VT13k_glM*0jeUH(<}ykS|EbQYm&&?D2I3FP7-cByzo2p z{F?-JtBbszTgIKt3_>-JdhN($BDJPFKEQaD{c-2w@4la)G=D!TGGn=U+19Ty--y^7v5 ziLaI<{Fe0lcBOk^gQPQy(*_Cn>eAK01G+~9BXhNDPAWS+nMGq;N?K36aQxPl z^F(NKjN_)3*mc$@&I@9ct}E{jT%12aJU$!JyrB5({(X#q(EBZVgH%LHSm+7x(H#7% zp*^6*fflK9?EKRWZdeGyD27}k?zIf3YhiJ29re{~Q&(EEJEQ;BKOGB;KTek|kBpo4=jiHrjUk1uU3PSj^;~|3wDOIpr$gqsu)$P^=_VliBB|9bV^9@fjSilf#5tCp1-KC=%blCe8!Q>5EFX*>Y znG|H-Bm&K|Ffxv|_~rKj)3RL<1k7r0jq;$qwJm*hhlxhlMc@LI_xoJ^z_2{GP2fW^ z7w7(uy|?g+vR&K8DG?N<6hXQ{dME)Y85(Jol8`|V1O%j0QM!hf4pBf6l$KOPK^jph z32CIH+wUAk_kQ+%-|zPa{MPp@)?Ti)XYRSLI^#Hx^SGo_a2&oeeD@V(z);Po@pHCR z@il(6TqxTl2;B^Z_!OCh_^5@;(nNQF!=s^wBW@SxM$(RlD+K%MW}1E;)Pf8Xgcy=( zXdB9x%gPjvf3UW)YQA`ZrYDb%Tz3n@1oLZIoPvkf)&}-ma^%Diqey?3EVGo`Xm3U3kGphV7u;YkWl2*!!QE0ZtI9?$GBS1?G_#tT;v0OK;QH=1%gbJu_-Nmai^!7OZ&c_V%twtiUI<52OVanICMP++dnJ{7IG9w8xXylw3uEMEqTwuaW#mMfn$$Qu7*C^8iqosl)$`h~Z9Z|72| zf`&%K(;t_YY}<{oR&11(;D1|RGe%sQen0pSOOx01dhCkk4Z`g)L7^wDWHdqMO6(b! zMblc8=s$%=0l!Z|kjWZ=*<|gASY?2n+?=k{E09x2(F}L|X@X3rp8{8fKQ? zxc;0DQ}vv73Tg$0cL79cx}Hu>PH%+za?r_@OM1wA+ln>6FfmO*=?aInhjZ!-MuV~zKFM~)Q@@m@e|d*14OqWWI!O0TKW%0_J4N#cD$;pcaT-vLfxULq^}=;FV}N-?%9<YU)l-{EW`5bOKfg|S;?JZmd`jSXG@26HcyIly z`V*46&v!=aZ={&qw6vtZb`0Bc9w0?T6iOo8XZDOm^`^yc#pV|*qsG3p@iM5W*=A2kg$m1X@?#5FF%^Y*&R#mC9QT~1c?iA(&WQ#ot)k# z(1w#R6fQEY2c1%PiX+-v)+)Y&za{PWg)@`c*b#R|#3OpB*Z*VJ)6DScpqT}xUd^DB zM&Av={JrLxI!Lc$`edcC>aO=pGp{FCdFBnlr)wfr7XZ(u|5PzFgg-vpoc;O=FJfEQ zi^ag8g(dL!Z|D&jxM6SJdWA!PkAL~2P@>^AO-($9!{o0d- zM65oYQKh?@uPyH-#Ud-$M%xuHer{@1O`NClx}P3Pg9SQCo%@^1ptt&a&mH^Tw}Hn? zzgMVN3S|%j6kV;ugpI#FDF?#9syneW7x7q1j{n(YYtIJVPIooPX$O zp1(sH&NtnZhT3`^`#i(3ZdfSDAiyaV8AuJ>_S__GPEcw1_r{_1@TP6vd6ZDpqRl^E zewJpH(7M{s?|%8s<`cK0TJ0yQOfw>ey}pegP!(E!svmbIt#o)1D6NO9c{#j@i+l|# zlRMobPd&6&QRXANqahO1YfD?AL*` zdn++E@b7hIleo z%KUy#MVax|q&=p0p{Qm;Ai0=N5m9S4TF&rYM+PlQONe6%JPITJSuaL=IBwT%qC&z* zhIYEY#-Wp1w3qZ=Rl4`aZDL0ze~-Iqx!jGXPZ703krqWSbwY7U2rrOTi*U$%nZtI^ z?ZY+H$}0O5d+@m4NTa=CP>V6k5WDf_@OXuKYX51^xJ<-};Ma%j>WSLWr#=VD0KH0L zn?YQx?vtAwA&P{K1d~>Gf!5SXW$~%R&#Qg|aWkU->)W1NUyIGOYTGp?AGDBVKNMjy zzX&Z;78RcAMJ+0tnu(@FzrqD|w1S+q2)kHc2b=gg#iw#M4fwj{cq5?am6@5jH`cV? z!zzFksyrOV(T_|jITe%M)lSu7f>Gb));A#}VviZ0KEGC!(&Ssge93ib0w0O#RCrG)g(o`v$c2hrH(lyLLB+L}kJ`;1AfP|fi7tt=}Sj~dCv$b5e zGo9LtFw9el$yIN4TwLUH1!Sky`(X0;R&D2i_Dkv#cg>*J(qIm!5JH}!?*_~5@}?U1 zojcH+fsYJK$DO6e(@5HlCH#dT<(Tyq`}XM`CC$Fz4d~mvy(Y-`dZM!p-4c`3yb{ae zXZL#|T2@Ux8|8Q(afYEXut9IBRa@yzN!@)O##MXEJ2RV!DxReitCQOks~IGw&dp*g zuSGmtsYVG5@AU^wmKSAig~wmGs;2faX`xjcwu`v@sx!C5Zb+Rp2WHir7Zyev%vJ~K zq}()8l92)-B+wOc950CCt{N)&_6Q=L;=-B@UUDVFf(#~ljQM&6b$2b1&zOVn6`YJp zV=bI6D7H2HGT*zn8`sOjTyw<}|6&92c=%`-mzF{KcXLk1(_L+G%-+g$b?mQwb7PIq za8JQYtG)WJ=8gKrq#UICmDO?h7DI*OgMR#+^%O2?>Q7e2UUeIY`DgD^WogA7csE=y zUCxD6EKS@dDkOMH7q79K#zVziWi@*saZ@myDkzCJ=NSSZ2qJboR>) z)LmguL(&G(nYhdN`1lQrI>WY&RZ?59P4D+aLBRkC z5l1#`7M0YLyY~amqXXBn|JCe-~>UrgSx}ht=e`MR|5kcLnS`BK-~N!5sI=~5fxCpMi9F^l6TvMEukCCWafLb`2T(pDelMmCcYk@90P%IeOD&J zd{g-iS;x92q=XJj*YnOPh#eat5qJM2`^9^$77Nsmz5?2>?+Q&PMB^E5_<>_4|B_4@~uy7mKuE|pqml_8a6&4M3Z9(_SENroR^p6v17-+ zx3#ygZ-o-(qM61dAh8_ozYYO{!`tKWU0u<5`1{=4>o9!4X8oXM}_lq>v9-ow~lzO`8pUztbf%3j?JRGA{a$ zO{kKZrb11f>x+ylbwQfP2DBQ^(9!Xxe1G0sbfYE_h=Wxfw&fM8g+bXuOk+DR-5Pzt zWHcP}B&P*z(G0kcGVwdCd`G^9IP5Edv-SS+`0d70470+AjtuFcXJS?@EUZF8=)ns6 zR6dUFHDJkqhKlFhm+NM(;Vo6bK|DR)l=G1Y%`AL-u&khONi~i`HCLb8W1YTY`hxFl zclMmibo*l!WL$?8%?n)IOiYHJgX!nY=?rLzk;LqN`nbPR22=3p@L9XqYPBTBe9y=I zbDy8xe)o{Lm)3hz6S{COBPul0C3~AHRu^UbWt@H>=iUx|jeAQv1%-*kkwddu4>V+~ zyrBu#ij2rl9@}(V7%W%+rygFwe zGgNkOPIqn%stD?CuQ>zl%dMkdU!U9^kZQ`He;GW)HstV

H%r(siDVLBe?wTnG+heYW_=F@L=AnsCYJZc{uqs7lkpfZ2YkiiPm>{nuruiMD zN}MXD#Wx+i6sWWyPcap$P#lT~- z66WvqzEa+X=%V9tJ1c%a-iMmh;j3v~_VMv?d$U}>$M0QzCaKPO>eC&8V3Tv+W(KI1 zcgO*m3cWdWi@iXobd8n#lpRZ;;z$jAIP#0Fy>;VVt1S%`_(YAZC%j!9JwAD*C5RX5`^8E z07o50ti8bH^sA}HU(Fv$#lY^5Er%s|5cw)-7XO!K4fPt}_&R#<(u=fZ9C5+cQ`x}`u>=pFLzeXLis{DQ zdh7CQ9n%ZauYjU7H#Jtr2t+OI!CaC$C{Awq-`CO62?d=R zYE`7V=>C-)E>K5U1gC8Cv3Dvqek5Z5aA!94{=wbe$~KYmHX=+SI~d8a#|{B{2?`%6SR4-x^cV)LIz%bzA2ehOFRUTJ|*;%Dx1@Zl$f16q}^h zN}ifVPV+i_b=sjqUgV8FWd5^{&Y60^uzMMXqEeEQYQ!Fj{5pH2Fq7sM}JOvio<9`HBWd2bn3Hr#0NJyT5wb1*A&XaDcn0<)s zx7`OVOaSc`DPQeeHKC$0(Xg?ZEdcD}>i9|?XcClybTUp$sRlf{!1!(tUcVcR@{{hJ zSFc|69$t~Tl%z0j=jwS>hbU4CedDYjO2Se|PDfX}l^ZQ{p+8UkoNFQ|G$g0tFn-t` zv@8A&Dq!O^`%ZDGC)Y5dZ6f3}bmx7BtDMT9p_7}yqbozYHR*I-vz)U4uFU#IoR&;B z2#@H(X5Ci2?$+-mnuz@|QC5YTT3baz<6aRarm0J?Zs@Y zGX7D_%a9Lhb^as{#t<8A*#ZiB50*^vBHnR+vK0b6x>if@2BFGPbgC*UDjp|i%snm_ zzf~e@NeKAN61Zi)vYHPfL`{MB?^}7{E%ew7V`P;+4M?B=8+lz9Kr>gqG()o}jsQ$& z@>YD*r&I;}Dj6CIA*XrH5L|Tr7<7tu_F!vNs}_=m#rN;uxBA#fG2I7Q&`Y?_kc7(U zLc>yy=H_M#(x8v-b;}|FTQ)$?!AL5lg-K3zU`Ej{bhtr9(y)qzzt^-DU*a?A^ z!ilpN%D&i^^9RqoOI5}hoDL*Nqo9Fda%LovDSkSx92PA8I9KymZH4WZ3*(7$7}LCB z?j)40!=i9|a`Aj@v#5Qube!uO&7;m)&X<>V06BD3xcyneq&tVp`Bb_-)} zr?+*WB|bTKsss-skSGVyHBmT86vx#fBT)=kTq!KSR~z5h{#bZ!vNi5q&r_@ekst)} zio`DNH8?i#XQL&jf?Am#0K@pXIsDu!N9{csxlls#v4clofpS6gj`$c~04BMw3+6bg z+sI%wL2Cd#j{pHYHS)ary(KC4flx1bIZ8YTeG^%()Is~{={D?po~{{<0VW-$dIA;@ z{v4P9PTV$$0P!Bnf_PdQhe?i3RNi8hbGb+e$@NxdM(ocG5W~-b z0Xr|{#w&dw@{#@RF}w_Q^MAB$tm*!n* zAz=ghf%DKxn~u8!ijcK@^JR%@cBk22^=Go=_QXv66k%VqLF2ohp88EC0Kznme?!`-t-Vkl+P*lPLob`fOvxs2w=uxpFe z)ZLW}Rki-AoBinqNq(!;bLxX@oGH5_=Tbz0A7X-9@N+!enEqW^_~h1#j`nE?^Qp{pd?B1BF&FqVb|_0D zF?*HyAAkxP|08}xP|Hey<^zo1hChBx7)zY$@)*fzo6hsaT{qfef;;6G?&W^lfr+Eo zwd~2Exgi^C@$_zN@t;xE|w9g!~n#2oKROEepZ0=^LQh8;T+!@es8n+X4n+`mft-Vd0t zB$2s2J&-p}5`Zc^1a4$^88IL1xBJVB&_5G$p3f*lTp$Soik_YvM@fvXaLzrb z`rLv6b+ZaZxKBM$XN}eg%yihjs8SjWXF-`r041%%a2B2>Y56qH=H(h)zqn-1wPr|NY{!3H9 zVl&t+TosMki3{a^Dq@W&y{p;foJ5$N0+zU5_SpZ&Pa_rY=lA?Cwd?we1bVBXr?8SiqXLr*XwuJVih^!-f9b!Z|+?j3p8 zzDSdg)yxPw;aFXw4*dsKbLk`)B@iw~Fxd)3MBm>iGJd-Bq$4PmTbixQi0_M(#h^*p zV=?7jsat(y=Vj;VugEUTPTuhH-}$Kf7@~5Fz@=BAs&Ci(fG z4N6eo;)(z%>CBK;2dUD$fl_O|--id>A|mm?fwK-vgLyH<=x_xrUHX;%s8F`jY%Apj zab-S6-005w&%P(}^aV19cgALE&Ip%{`dSEjbMvK0KDZoC)mfqZ_^F8pcNl|guvhJR zn!b{nZOZ!c)5v+<14+@v>2Ea_oQHaug$)^bhM-XOmz88)2^<0W`b&DRLN59rUV>8a z6^b*wh~15!NqiTE*%6>CuMR2k6DYFlL60Uf$UKvU2K48?Veq1?@JI@pnoatrs%tV& zzJzmHhZE(g*c|9j(6OL4g80f@x|q-JZT=Lw_rpguME|*o>4k~aFF!>b-;qgf4-uai zv?VMumgKvR&JhQ($wgxSE#18r`8~^$ywt$mV9`wgdq`&H?ChM_YuxzYCX~SY3a|29 z(8)notY~{7`agHqdAk<`K}B8l_I4@s;;P-ubD8NZv+iOjyk5+}2THo98D%suqHG&K zzkLSRNojkv{x28cHpFM|@mSc|6)8yfY0O@i43t;|PPUGT__UfPLis<}?AYGfRxsYR zDM!ZJF22Rwsqq|C)6Rv5CYU)SzE|GwK+X?4o}rg<3o}xbX7;6`pkwpo@sqf#4{b@} zmUEv@=?XcS6~)w^2LUXA%L9JhM4ko1Phj=YbY6AkY zz0SN6vOwhi6wj@2?Rf$~W-3BLLYUYL#H(IDhg@d%`ddFm1emGfBV)OBo*hAA6J}f( zz^UN904`Tr@4DLirhKMypmV=q^xqd6vSR}5qr}?j?GMN(PYkd$eQj%ekPRxHy7rjv zC44hpM2eXGZUE;bBwn#G24yzrvaZW>9r=3CqS%mk6bXmki-BPD>4OXX#VVkeJ@y6T zKW0sV%NEnYi*T#&ndwd6ru8p7bpEV2Z*=>PgAUq4+Qm!N;~77GM0uyXZBtXexfztT z`&otj_C35nl^{f#nwJTrn|W_eUYY*#{Cd}yI9xPy|Kp?Q^Bg_@_R;gX5EX9ocgrj? zM?yI(9qeK~#xK8k77CG$t~f{er&DNV6-4EfE{sD9-?4UDnhGR&tJn~$M*LGRIGfyP zz5>Z$!3$AhqE;e|CIORk{YV<=GZ0jnQdLntUtYN`Hu^Bj9;f2f&eHiUf2jQR1&7ud zOr{G|KP)A@i9FlCekO8sp$MZC3wxB$8(PB<7ik1Sq37M%t*oF{%ulM+B&=;(njRv? zJSe;C=;~5z%=Rc4<*+;MPfzK;_U+&1>n}JO-r~{lERKf9hHxWYYH03~&Dya1?yV9j zxw>PdcG1Pl<+*^E^s|=d^E@+buT;NCl(l*BdD%w(*|KWZ!t5^LrqyCG2{8Kw1z7Rbe$NUeX zJ(iSlAczggRs6uy!Sk zZDjz>o#O*GXeXz&$}^@7%vL6cpR>m^E+DX#g(l{#3%JOj3qCC*N8Z)^CdOUgJB^VP zQwoc~*LxPTu_7NfyK(+3?``*!N>Lxcd%mgP*cc|Gsh7d)_QM1OE2fw)tuOwO)&n(E z;ViM`LTGf%=xlb)y&(NV)GoRCW;U=oj(-~3_$sGX{^4_Ox}@H?^VGwi6CFM7adG(2 zX(=2eyaVFTBX=fex%Vp{%XXb3le-ce88hf9Af4{knZkTfw0~*BVX(7mpftq`>c_uG z`LC_}D5(v8SK~8YFCy!iZAdgKUvil1&VE};E4DznWJ6DJ88wS@R`J16GLwvrP4DjP zohvX$59(KGNa5SAy}aOXV>iWtlOcO0krrN68jbdx5lc6Em3BeQSGUwlJEoclE=Y=P z?uzC`79j8OIxPJzb!QmU(Z9hco0_4mm!D0b&5egX9m|!4$5I>+kRNc$Y|e-9K9dTu zr@R*#^O<9cA!?M`PYOa}xtJ6QnB^r|o}jhkVqTBB9OraMT)VVv*I&0a z?Co#uW_{?rBq3a^>9>?#XDaD6)4E}-dC{_hjHWXyN(h^{AmRQHGP#}2Jp1^~He-Fx zRTtdc>ThH-oAoLQLws2X(HiawK?Sta_)k9liJndUyT2aX+_Y$MB{EUj-T6?R>9_Lp zjbB``@U2tmagupz%A?R%4s(eDd3@-s?T=Lbdp`vV85nqMklz@mmviG;WMJ6khTjU>3J{2fDPceV1wkS z{ALw#J{(bycQt*2V2)-{ZX)nb40Mv3uzusVW~TAC;{;x2G|)k?Kz=Asa%1E)#;wJ78G@RN(1&Htq}k=*@)z*2wkE zS)KL$M)6rMQykznle51tV5%GO?>1iV&H5d8czCCIFK=_>zGFlVdDCcCO2Yo_9c5w> z*LnT4lWrKc2L zs*gnri1(I%ewK7;qj}dN=NcIHFp&?qN0B%gTtK0Km}l2mH6hbQwhd; zpBD0%ylAxf_D(aEb*=B>?wp?fOIz0q7yT30cXq-e@fBVw%W3V3@26H8G22dKme-OF z`!YRXtO&^br#gDdYi*|-gZKf=_^qBxx7GrR&eB|Nhz`fAX2KEH2NGfjx2=8HR3xA-CNC;+dqPcwIc+EehoWFeAFL8lB zMJOHNCxzT#Se6rIw0BHsl4Y^f`gST+%EMLU@= zy*rv;_tCK{+&5(X(%xk*Tf1spKTC6*6S-_Z{LJxT`LX$XZwl%=jNTjAJ=@rdQ#vg@ z@}pc{7uDK}i?)a)8^PuqIJlS;$|sG^HgEmPYlS$cx1B?N5?bE}xs&4Uub^5&GCUWA zc2^IoF0rhN26tQq=wD!Oi3L%1zFMBG=~zRIpcQsq_~9`o$#iy4;4zk^PIV({YZGT_ zUVP7x7x;K?da7Yyzg!;rSkGR0w`MoeJGFGy6H+?5W96Nirf6m({p#V++{8fhbJ!7M zpnjse{~B?86u|%h`zn>WT3u}E!PY~;8GeJgM%%1gubdRoP9F0$qhekaGjU((_`LRF z%6Izr{8k@SxmrXkZkB&#WNA9jTuXZ7fN=19VEb%el;~5NOGO29Ogu>URK#8G^;mr6 zrhzK?Jv34|qkJXvr~2ECpQH%7k5?sljvqZ}%oei^JtDTA zUTf4haVE;!`86nfh6YiUrf{5a(#ooM^W=$GQ)hSd{dwJaKNLt!PM!RTJwp@pI7+W?N1n zgVg=lEqbv}p@ZMQ$Rsg{gmEa(OxPYIbl!H-M+y2#?a8)1uV0=Wy>7{ie{G1ODwq9@i>L)n~&jf|6b z<^J4=RCZuO+_~h`Lx){ftD|L2_`8ukVCtWEExt4ZHY?z`hokK=jb`>>?G(mhX#lT!Bu?0-XVpTRXa0l#Iif~yJs$H5{9vij zOs4)-P(y$hVuZXrX+Ny-3HHGvK*3iC(Cfdlx_5F~m9@jq@q9-HIqQM6!@D!*w>nYH zrUG1CSvm@)d9HS{l{O9TnKN?->^!YqU71g>Fle2em{wU-R*cLOD>D8(p(VrDq2uS< z?H{cD?OBZu>Lb61e<#V8X~(FpL(6jo#x;V?myV(3-|w~6!?tfuyMqdG_fCdoW%-{& zRr;`)ts|g#f;x2ed1PkK+k;RMi@{5($d->kc7Er&k*9MBUQDLye5|B5Vyto&KX7jN zar_T|>l~u&rT?|Xsg7{7D?yhZTt}72yrLYQ zJKbv|l$gqmgz?}dju)(yoD$5fz9MF^`!3d4{BiXsk4}jElNR$f^rH_WjBcvW!x#Du zDX2f>yxSgq!AHrpKrQNa!uPe0h12_qmHU}g8_795HD^aOV7>PSF@+2$x%Fu z3^HN}zJcAT)<-)P+|WnRh`sosOW#pH3DE&f!aFXfsIc2X4MSqV^$TvfOM*ZKS?+6W zO#l1$^S}JBE5(GM3namKI%R0Dr8_ot-8Qv_jIR(TV)fzzHY&WBit!Vq(B?!)!_p+= z_tY!*P5kYrWHg^L?MjG_tckYd(NLt|S3z8xZo;}ye0h@@Hu$IF^$f+4MT;C(@eox0 zlvDp7;0;6U!DRqM5c{TdLBBWZm+%+bFL@fm zXO9fP;c$>5_EYwlIByJ^vHmqNBZ~?-E@$G+=A^@GhnkMg@c(@@Q4ok>-m^~(W5oeX z{J*}W#s+4aXfPwTMvX&2oi z#TFzK1l;WS%|*7(|5&pRF3<}0>dV)Rxp^`WN}({AXOzgGn~cKLP&9!=g$vUeLy(S> z>FXZ$+F@F6MwyoAe9uPk02GdoC9XY9%J5O?X_iB+$fjxDi+Yd~B_{ZxMw;YHUf(Lt zpS=<qNIJvk?a;5q-X=gdhsWX_m!;g%gsAfpd#_9sditY zq+~hqVUN~;%bx#;cM6+n$_0euB(>y20gj!WT?aZbsvxu`vPo!!dQkz~Z3I_?us_$k zXN`V~HLXWG!|_Muk!f|4Jo6p<7!j=ibrf7kav!inq}HWh52w#PBz_RUjyLN6Te|tf zhhDhMpcA-_d1lpLE<*VoQvkfTKC1&LPYT;032D<-tUNj6YZOE@qHF3l5e27Cy z1;As{Lmg-xF?5QFiID>=i;p>0qe!cR)*D)!YwMq1FYX1_i|EJEm3x4-@(AG$KEE#T z+`2#t#HY!j-af zRmGqWs#Xa=7vuR}<_}jviI_Ly+&(ovKHhsps7=&D{E#=6oPvVo;CxqX;1rQ6RE?Y- z1`dF>Sj=u)55=bV&&>36z5(GoT_ct*mSX3C{RYkFC^aPY<*BnY(+>mM9}J+xKed|z ztof^U;O8pcz}IZdZ457O)I8w||CKU1d)jzwWxQ#blH8eq0|<1WN_+B(GNQM)m(6UA zN+{rHt+T!+o&4X7KvIi&9G!?w%=*dluN4}tI31xRO|+J7x$O>x16Rar2CV%{Eul+- z9Z5L!Rm5$`|^m%RnHeug_rR`dO}-2`ORZlQ1kR< zX}UXWH0PBn;mSkI@y|HoF;*`*gjcXfOM)z|8z0dqN=&tIq5WvA zZ+bvk>Ao{mBxzvjgSx z_Y_l}q#+RD{E)uBU=Ion0ut+RA1s*VZb{r!B;nqd1`sms_4B*7sG@#6xFnJhDQEkk+jiio~KeyivfA|j%gRjvNFs~Rs95tE{?%0JzFpMWIfF2tHq%HHVroN2 zYhkCPQa{gUzy9E+T|{(rWz`Ke)3|b#N99}Tt~u)q194!Snl?;GCL}InCSq`g4%I^_Q8HU23pKA!##Qo(B95ymNWwz($A@Df zQNwbmv6BD{Lvgq925~6$VL36EHA!!e1!CAxlMeBnE*d>oOHIQ2`t*0#x?}-lR4DV_au_VN zj@IXnut)#!?PlnD0-JB;@U?)X^8Jqjn`J$=7+>sN8!Gc>3(Co18pV>o6FCf3L>kn8 zfSVC#fq@a9k)gTZO2;%tgnJE@1(gT=VpD0`uP+|QyZ00si(e(+#jJ;f^UKxsM=fL7 z55@jGPDSo4dOVCt&>AzTe+=+AQ#vcoEnX?r#6>TO=z- zJGA@~&Yw6T58x7|+iMrUwm~`EfL?{dnZnx7PHH&LnkMzEDlwQ&&lh#oFFBA1{V}qlXRHyG0->y;5^L&V>WlI*f$l%p~pIo#{&= zBKj2P7lKB5_ZFnxT5{lCqDOEE7Q6JZ8};K!)d=GSLsn;Fy;{THpu*A%s+nPrLIBW| zc(rwE#-f&RQ^pu`eNx`+!ldNOt5Cz*pRf)tl zswnD_1#1*)*uBYN`w$3bRV*|M9R5&S!(2eR$3oX0!0q%7*$NR(UQ2Mf1dS5HpaI(L z)E+c}(0u>m>pEb*+g(O6{W5OLRB6Ws8;uDtnyT1J!Ry%sCWa`Dj7!x7JbDr|yppe> z2{0@)l-p@Sz#YYs4cvKlXNmSH?-H93r;HGb&PSz+u*-=LX3YivmNzH`u@ z9Z4@$ana~z|8*3xv3`>CG$*koHU^AWf}@ky^V2~19Ika}MDKRC{+^ zV`VtF!jL3{`1$$E8$G*fS6H>H`VQ=H_ywR^$(9QGW8cFCK6BS0K#u#&ztK$5nea}X z5-~>aQDs+d@78QAEenUJUm0FV#$opA38E?&w!p0FLQ}YHFJ2v|}Jll+3QJtu0qyU#|%NWh{k+UPpys;3v`l84V@`6c;Zq0yj^J ziHobHV6AbCDN7U&h>E#z{zhBd+IVuytGR_ToAgExDtbz#Gs|wNL&k&wB4rBI2h>j= z{7GdRPvb!e>h~Tb2VdsD6+0?BLZr0C-k5O>f}5W_;ts?4OHt1!53ELSfMPf%>93l2 zcG3=O#0GWFmhWOrV)WBk`Q4i5xZN<{(`Kmxd~Da0HniYgf~0>OzOY0G);DG zC_X0uv9>spPNMhk5Ey+{aG{Z8ZUXMLYxeLsSnIgl| z4}4qcO>ddZ1@7bG6Ao(v8#6T|alnqZDWvW%7eJxyz{7vwTslz^m2i;~EW&?ajUB-5 zteb&m(y4T5@(5BqBNj3$Dz^z>|Ivrch9pks^_h5J4k@?da2Y++BX{0O9_`)$N@BP= zWAu{eDrx{y6e%Yv>hlC;3S;oaKDc_;7);0g{KsPwp ze?m@FMC%TppLQq%@9%m3qt<+h;3rSm!a_m>C6G67LQ*}NVdrrRb4%ZubLW%~4-ZXz zJ!q|N-FjW9)N2BHr~WV>GxL3*tQbShQq0BKnd`!ZFcwx;_SD+oOTiGS^^Uy_ylTj~ zY?9XV5_#V%0lurwJY+a$yp)!*#;2;Ju2!m8q^_P`0gQ;9<(LL4f?DVSL?5cK z-MRd_3{cccRuQ?U?%2n1YCQ$My}XeYh2?#`AO>`a#bl)Q@R4?#80kTqU=f3{&Lt0r;m6clskau}OpP7We7~U;& zK-uL>JbvB0{XCXe5|U%C?{`X57cd@2`~UhgZr2q2FoIjR1{KNz?wUR)DBB5QI!^%{Vk_L52 zlOUeC$66-~+jI_o5SoC)XwnV(D7ZRX1v`m_BE;T*{HO=ECfr5Pz<+bOq(rcE&En=| znF!W%d8#O+vhq1zM&}Fn3Wp!qeQmoQqew^; z{*5hmpTGYLjtfHrkd#gsr6%{Tz2ZPb`1e$vX%*x|v=bAne<^?$p;I--%Y*Akb>F`q zQ6bb)e9@Z{*8^+z6Z8sGRFMn*4*rnw!KvEP2lzNi;04y3+Jwm6y}R1>kRQ*J39$V_Fc4z-I~Jb%e~WV3K#>wwzB*asCX0 znM_>-y#(@b79gW{pEL7eW=GP75mYF)!u5f1AE)MZSFdqb-;Cp9QOF2S8}8s*(NDU0 zwy6L2h3hYU%v`yfcBu9Eu5`6-Z+tT=04f@ZybFtD&M?hWd-XrnAjKX6zmAEq|9GH#wsR_5cq+y7wE9EcqIq|MVq=rL)FY1)Qppb?|@HhXC0ljwC#F zejT7DW5qk^y|p!h@;n6nRG5p(tvES39WNB&1v3A@RIu`vlu!Jl`jO>?cju=*G_sTc ztu4uY26Z>G2KTc|437Hh(op4cBwP&pG3f)~q}v!{*?MFw$H#)tf5#e`;RlUOEW07n zI)mTahTc4?ED50?FnBx&{G(UjM!4)%N1kdP!=mYZlJ@22|FiODOOOIrClxz@mP{n{ zQe5LYrA6p2dfcL(op^suP$AL(*Y#VsrW`kPB2mUH)g2>C9kczCnE)=3#I?kIoV(hT>GuLAJ)3K^fDY^F86Gm5c2Z-MTW|7crCxgfvue7ekmU}CGC<0G>5^f!^Hfeq z2vHaOLc9g}X%5K?R|}C4SaAvn2y7V&U1NDDCohlty~-)D_w(n_&=XXz`WyUzWBNMY zm6eG^2LuExLw@&-tVfBN()pJ>CS}JzDqorG2R@v`8i9+C}032-D%V-dq`xmc( zN!u3l4uI@K4&Qs_Qq5cIV9{(Q{8D8U^}AaWUy6*AAXN~kYbVgs*FU4Eq(s&50ZDB4 zak}+1$HNa1Gz!q1U#0BaBYX}@3{sk4DHxf~9pG3D{@Ha|u6Q&wG+sMT(-kkYhmvq` zB$Ju{2n;26YwF6O;@!2NYlSu@O5kK0g-MnKh6aH{Q#*(tWk6eSp$rHVs1yNosZ#FU zCBAj*?>s)zgL(7`_;=k3l0MFuyY#pYf(`ls%$ZThiL}GpfaCY zT4E}6!QX^{V%Z`EO%R&oR^UNuhbN%Sm|?6XVZGGp564yhW6c{=MD3n{|8!$#XQAO8 ze7jH*LMTO|NaD!WWTHvwOP5=^A`mpH>m zsjZG`=^?Ce;}FxZPq?}K>rN5^x&^YQgu@&iuv=xNV`TH%QSj2gq`nSlH1==Dgwc-2*BNT#12wcND80{wT@@%|X&V;Ey>lJ*5#{N@5)vVu^&Ads`6)lVw`dvc!P z{f(RCaPeYWwtp$-oTgtge10RQw?>n4KA#$$q|A%53Yww`1m{_%&dwXSQ%m}8D{#=Ff<#qaO%kd5()92#;yCH%QQ9G>O<(%^5AhL1$SreX88 zjJo+PnEv?*c;a)8=x303zxXFVXk9#4+sgsy6;Ki$T7h(Fa?mNTu%ClYih|gkW1Snr z-!2)(zv%*yM%Qi%P`Bo@d$1pMsT>V&DjW3G*bfe<`xPYGukjlpyovT!Q&#xkNBYvD z?^J8+!XT`;A;Xu}kSSgIY6io*$95=7|L+SgIe(|7(=FkOw0kv!2}V7VGbP5uWlu|z;f{Bm_*`!B5!NKWY(_va4Q$;%mDv6>yO6I1DQr^&^_ z$|42(PtQOC#V;0>EsWctgjA$?0{$aEKQ+w1Nq^`+N&mb|J!-|;L+{X3)imF6krJ15 zD(;^f-4i%CqG79tJGmkEg)i;fxpG8L^-)lk-~7471U$`tDh+T`pL0Z}{M2o3+P5jL zgpq!h!v851r&4^u>Wg+E#NlSwmCs;!4*e1hGH6f0D*42;x9neNe|b@{=@o{-3>5>zDj9)Bl3M{+=r(Q8TobR#AWO; z#GiK+vKL7I6voOzrPuqf>G$#ZZV-Z{5yt*@y`Dftz=TQqp$nS-XG|jgJ0@4<)#!Nt zz>(O$KPlFfX$=ve{bzm-O#uy-PB)=wt#8&Ca0mXp^s*4NWj1~8*YZ6X`zJn4p^z!! zrTl}uV!l`jimh5cZQriZ8K16X!2iz~i2}Oo|As)>|1%%`^S!^||5i7q084H6WTTwm z9krE?zWw-IXYfGdpb&0nP3=g(_kShm>()#UzNy1m6Gy5|2D zZ*ccT2IC*=b#lz#3(aJbdyUG6$jD;)pLJb`gj$(-Q!@W;mOX(=108^GtDPV4{?3?9 zi+w7*kk8ZKk-H#76z3#wD-u(gd^pVx+1_Xzkxz)ifpz@%RZKyU%wm?Gwu^)eA{hl_ z7FV|83gsTV)e%B=#dYzjhKT~wP}F1pok|o6=|8jBFF(9@elxoMv|7avG1xg#m?_}w zMiyKR0*&W0iSMcXkAfC6!*}#>{>g{I5HF*F2lVOQRRDcUo86yBZ#OlWQ+Sg=b~`X@V*QqkOaWA#r8yVI6M-nI>A`Fj1R<0MI(aWa{{lEU z_GNlG?Tmg$Lqp57LG2kHHW3dnMk$)W+|lL75EKk)Rr)(b{b>BFY%(v4P?x3eu;MaW zgr1|nI6?a9ZbyKF0}NOl1+8~HE(o{`<$nNiDj;Ey4r&uvOTYU(KU6y&Eo#qCMtZ6J z|MVjR(6B8MVf}N-P?d?(%Z1j9sv;B+PLF?I0-wR2p0QtA%7TVy**na`CIVem+LxqCR`hD+EZpK^Qqoe>S=Ypgch`}Os8 zG~%|a`>U%fU>w%e3akp3J;LD}y$P|fj9K5e65|6Aq%a{Lyl{q~$Zk|LMPl#9P9Y(1 z55wnmziQ&>I}Hd5Qj#`=Df1KW2*9idPp_DOh49SG%y&>%rv>-4A0}pIW-|ep)X8Ie=lKz(W463MQYz790K6xQ2j z6A?+U3zvZTF=^|{(UI=H#Z2;Ocu)+ray~OVHFbeBfO}cA*Rm=T(};hY!mSXJ_`+@4}*BI{$ENfV9ewegNcW=s*dOO|ml2(Fq~fQQ*3Z#`eI-K!&ypY+Q+B+O(Qr1ASg^`M=)O z@{yyQe2k zRtO=}7jG4k^ndXJv;Mt803GJjf$-E_=_kN{ z=t7`}$2lpU8oY1cwtF6B0F~V@PYULLpY+i$0ZqH*^__3BS@QtfrjgW@0 zh$wskFLNO6xx0i z>Lg>oAnnRXQt(*i+j$@B zCQn@c#p`=r+S7qxDBTN=;2QSnCQc z8Di|V)93Vk53P3?(Rb~_EumsNhb??qjPb)ewX1}(0z}VW^k+!>3~Q+Q!+3Nlj#G2ytFtblWeVe$0B&tjk$5`V6bK@MZh!`xxk!;L#E+E zX!+hq7wixgcyPxsVEq9RM9KndASs4M{a4Wo(6*C>RG~;Eqs7?VfhhV9v-4TkwVZ5{ z4|9!h33?jOSlvp$X`h2yZ3%nE+7dPWwgFFk0lpjOEY|oy; z8{6|iY1NGLQU5*c%!=bdFve2e6za@19*FP2<8_ba!Hn9AeP{4|ZcPU8uir%1%L7zX zvFAq^XbDs?X=&1Glc=P?u^ic$LE*&=m($G|Am%hlYN=!qxCAz;DbSEn$`C;+7~=p^Vh~qkS)pJ#n_en9W>D2bZE=t^ zCunuIAO`DF`ez~QCKy-HVVLlro8d1APq-z9=n*f40?@_SPn)$uy7TUElJR;cYY?i# zFgjYGK>NBS9UBrKLF|F3NEOOh0*;^*#nE7QxAQME1Mn*-!-!z)nAcuNwF^LllK}2_ z%3%Wp#T&U|IT%pZy)6%fx|D&Ca`6YtX|R?QfqoK@Pc%L2#I;s454jX*p@ae6OC%se zU<7A=Aiy14IJ`vW{@rrUWHQ+BUP!Z$Rpcp13TaWm>mcZ~;9)=xo;3=2?OtZVk?;gG zViI=tt59%Kia^U+!y`aB!{4A{O&|5ybO(fsypKMvTwt(7!h#CLe=x*va!Tn6fr zLuC@gMI+BQUl^NT9U1 z63qZBJ+N?RUSfb5X96>>N1V}+le1OT(7-JV{!}DOiHoO^Cp&+gZo8x*aOFu_ zxW#&=Y%PLWpym~ua*z|^cF`@vBQo{C``&?MbF4NPiL&%UQmHK4=-pQG+kH*ti>wmL zy8BwYRRBG$IR`;%h2j+0ARYO|L*HM z1{el3)#d;Na8c6yaD#?wSz|=-5uACcoKV6gn)yRmPd|V5{_{X@{?6>r+1YsiAb)^m zNaA$6u%MSr0>BhmS)Ox@#+%&&tS<0&zZc|^ z=l^8qq?1o&ULWBSJNlp{q1Q;G%6q#WA-fI2K-RZ!FoCD9*{rl$K~|NGHe#<#hL{14 z`U^C%yt^(floX&=R^ec*!V;pFj8h85D;9bC%v( zFVt0e0&x`DVjxWu3xsKf_nV_bBIa)8+x%?q42R2MzC-{5(hAA4$&qRJowTIcZ=Xf| zOK{7B;Nt!6r$)=Hmw0zUo-bi+Oh#=K_!Nepp9Chd$;#AFJ@z?(s)5*GNe=s;+0am} zwwUP$YKt%vg{CfU=#np*-7e|WC0?3%3mQ)Ee*JL(p^{(AE`5OSOZ0LSL@aC66&cX~_U z*&(=ke;$RwXtbi_;c%*o@w!FZ$UeyUI*;h2oxZSl@^7dOauv3AK5YB6P`CB|PdC*u z(OcJ#DWmc+d$&bO_HkoJAk`Zm&=fd^} z{p67jdOQ=`JFKLoIvQjPKZCo) zu1}%Q-OlkjUH2o`<62AjL~V{5r5_%e4DXWn>rByo#k^#ao6dNx9bMA#NeizCpTR1Z z4vf95cJ%4?3%>QE)0NKn8C%n{G-O*f*QGm2N2L+0HUSblXMpJ9>Sg6RwmdYmbP1CxZe-k z9SA7Eh5`eWt4Ki6aSe-@#B@nsTxEJk#j#XGa2DA}AbNMJp}NV=xxCi8Si>dGaweqSCYbBXkTzSEDhL}d# zF9Ag9mFfpgNsx`qzc=ib_bV`stGtM~JPcHM{o{On4U(rXR`V;` z^H&bPJ@eN3G$KVAUc%+R2M>c;NE!@Q>+`rv*LJQK#hMM7R}gI6F;i7e)+(_?_hh>$vNX&?{^F%)C;6!uV$IsB)D< zi1!BWeCPNrh;ZU1Jdg8=qmFrbkOBz=p?#bioHTUDbA~2$U@t9Y>ijAf5O7$Wv8kZ9_m8Kh7J)3Bln?qzQ<9=-uYfO~J zNN!>^8&7p7C*eS=<_~KC>hQ~dXudl@f-HLnv~MZs7<;3Sb7FK|gf+CIKyKX|DL%UN z?p;}AyyFp>Zu=fOS%YAW)z}{285gvrXXY>ZYQI zh)<~t&Z&CYssYZ~jrnC~i9%eq)PwI*rJ98Fy>H6<5?EN+F~9JP%rXGr-51 zUP*G=o=kGP8#QbOs-_W(9x+ynb<#jDJ*TqLzAhm}Hf^F{q%zix^d-4W{Cn)3oy^nU zsVy!1@dRAK+}!pU8NcqNkFV_~y6pEt&V{(Z7HMi4c?FXw16hFx|K(J>p8kR2E_cV% zoYS&QWI0kE zSaroQn((IwND^6esm%SuW;G@>SH9jW!na5kiWU(*QS!lmm)RLnZvSnVjWgAE<`lCk zV3n*a^zjL_1^~npwQzxuE&w8MPzp&SvaHADoXF6H`)EAU$eeHCK-c1B#D8w658x~a z2n;&on^MezEh-+v@m08eIOylgmHOS5<#-=LhezBd|8*M~nhwxw1wSYJ?-#Xw6`uB| zZ$V;G_QGSUKUv(MD_FJFc85Wo<0nm)(PDX>nh;|l@%yJJ!;BD;^Wy(l7y$7NP)w-3 zlebiKj8v-mVx~JYlcFLB5iq^cw30Pe>doTMdB&0z zeu!=5+Yg@6Da?m-$i5p6S+r9%qn-;8hOipk?S?=;=vCmbU3* zxnn=cHWKLKfw1<>Ts$LP__>>(`TPltDVn_XMNDssCMR&Dc$`+5Ez$g^PjO!MZzF5Y zH0E-1O6JsR327p0ez8OC4IBeFF`);ajsW@aGiBcCHeGVzd%#QmzXrM;qSC7O1ro^N zMkQq=HhnL4^h%`~f?O|kCIlRVrPc2EcG4=$2&d_?SoKaX{sGpKbU=a#$0h?{Py`X8 zHRG@OF1}eN1}z5|b_t+XfNYn<=!A{rS_bDAcTaU1EomPLbAy$>lSZ%10% z;n0cNbPlL_@EmtzTNTFJJ{3%?81yjTC8__o`XJiedNjOxnV$8K{t^N{KO%$$i9!XS zF3D>_aFYgvRDXW?&v`z_14lz}?;?%AGrOD1(9WZxWQ&>N^=JcR?;9qvkZ@UpoM+Wp z-j;sR#pcUb;EL^4K}2EC-$~0x9G0Kxe8eKgzL6lzTlt}kD`79UdOjXxZck1gqu+^` zEj#HcJ?BU+{wq!A0B4*s9aB1~K1hSvQp^bYF0jk3T?5lHuaSxqK}n z^v}V5Gdt<{7w57o9yJFLGX|eQ1>&9O_Q5PnbkHMrzvNc!&s?!bA-Sl4+9o4+cP%&J zY!fdwz9GJrr5ow1q(rXvF&W9A!_7^_&JA;=3{Mu6IvSp%sUM@C1>IGvtSR=Tpp zV=)PrGuez{^oq%nC;7rY1Fpko)28byX^KV<*;np$y9VG}WZyjkX^o6Okj0Ln@#N?L zh|ETL7t-SekU;6WnmHj2+EoZC6S1~LRYQv!o^6}jn{$;mkMT-1r8Pa`cscd9r71z? z(KDQmR5#non;@sR>MN#h_a&v*=vfh0u9&V+lg{G$&Wb?o^>w7K8b$C#rig>aXfvUt zr@Z_AsD2omXjqIdWXC37L5_zZDv+yzC4S`l`BG4McJ@n(0m0UWiDdoggV8~(sMKY3 zWtxs}y+J~_Y!c@L33JUzmX9T2uFh$z8SAAetR>pdvn|ZJPbZ9xukV9a(~qx~R81%7 zZ@0HQ`XVDzbfG`T_`()BSMhD!vqs?Xy3p(4_#S}%+0ltcr$b;22I~J#^--{^ujS7t z8+3{DW!r^em+v?HVwf(f^X-WF4JrCqJni4xSJf#mS}q>1l=o`*u$Z7<@w|(BPnat; zz{#RuMCD7TmFZODF8KYKaHL|G>|%2A_*yvGnU}2n8z4HPtp3?2m}&rdiq_NQ^groa zhj3?)2X(IAoWMo@GY`LQa7er`M*)H*Xx;3L!SCaxt7kRA5vwOO{@E`d4Ykj(pD4XR zD6(JXX#v8g+& z{)2z-%?;XZf@RE$-^XdvZHr+K1wct>6=C?Nz#^W&q7jA5LNAYNWs|P6^NXYD;>njF z61V{!6A_#6a0fluXOoA!W?LD?n{HdP@GsyLwq8*W&?t2H8z!jK2Ypa2G+Zj;5$4Kf6XxII?wvxe|Ztzx^ zTP@U;_93i+n`dXBYgX}S^Rl;r$|It18(OR7^^t>*6J^5%jqq|!^ECCZDYg%$Rlkjq zb)_T{XN}r6UoReO=i&cd0A}yT%^L?x#hR%i>7Uk75O0%({mkRH`_zZf;hhKM(1e#&?j&;j~DvHgxO%!krYKtjT zN9GJY-kKe=H!MX2NcAHHQw)SoC3A$At<{=zh39nj_ad`RE;PDCl+$ZXk2$m%*W9iC zMN}NZ%G!j>SD9hfxw0rPsMnV{fbiX4TkQ^isw8d0Oy1VjWfBeeVci^dn^(vL+|!#K zw3^jHpn;PR5fgLS3RK%OELl@WUP2ZYz~ZVWYgA+d(nOGsz4^%}f1{8a_ou?VYN0pP z7SF@ehT&ThNV>TaX{XrMzLL(lUgGWU>|6^3s}%9<@f^e4N0-hPLC&u@-U5IRz<7n- zKnT5vtZZry>lxyb7?VPKuwO{G0a#+?1%OR30PJs+n?S?Ik)DQ!CrQU;BMKLkeD=bF zf`SHx?5C^@aCU&1Qz6M4#>kC47W0q*M7t3L^F5^-vYA=u!;7X-4%!Qy8qlFvuC<}v z+PmX&(ugWj5dP)vID~A!vXyz6yjbe;wcf^LZ{;rIm>I! z;S>+CS*WGw;*=B!Lge3_lA7(bSscZ%9K8|}6YDMkB{K8!k0Zpoi@cMw!LQ#DTZ001 z7Z5|xP&Y3)2i1nia}*(;a_%bQ0aQSNIOR_1U4iO_EI{$#Pdw^lm_X;;(5n^ECwctGdAxhxSVzR zmDwMU8ol^^n!X&?J8s_c$$uuBC2+@8Ws}GAODj+SbV#9ztlm_n*psBioyxd0ZnyQ) zu(&VZWR-%*SR}ZeZ6A+==5Zn4k7=wnAqXLs3ovi4S(%tBa)Py4CV<|ox)UTwDwR)2 zRT}AFqYm-fu6Nf9RmPf^;aC8WOCh*Q{#jJj0f?gMuYAl^Z^+cmwI-60Az_4|;er6E zNJeqChBklpXP&6{AT79ff_1*1P478mn6Ft& zIq_0}Qe}}o6akxj+hX>J`Rw@h%~B35WrHV@nbPIrqB!0pd?I^-gs9>}x_C+DDwmU3Z`oV&yUFSk3N!#Q+#`bVs0hnkHmVpShtA3l`c-fW?c>nJhdG zKqQ4c_||>$I1YVLzo@Av_eF8aLxiLCt9!a+8$~~E#70U zowJvm332afe2Q04V=_?U4sp`n;%yDzGO zSB)ap>=Jj8wV%aWl}=G<`%JSgofcJFrd|d%Bq_s&zN)lW5Zs`)ww(61M>32ET(^k| z*0^Sufo=f?0f8qM8(U3bMs?x{A$Xcy5Z6w|<5?t$+ZbHmrLP$%X z7O~*bF{K2Nj8SCH7_Me zP&@TgXAZepE+!5ZacF-VL=NG7u5Lq>_GFZwmR~j3BginEcHva$V_uR@h`+$JJZm`v((g>p`*Xx9TFV zA0EspDarABVjkY7UT=jQbQHtw4Tm>8UTZzn+Hv&^MdpvCDIV|TI9gC!9S!fzA?@2U zms)r2x$>aaJCE<2mEfCiO$`K9_C#?yt(D*y45UUZZb4L(qE?AX9QQu#E|^6|Fw{Sk z;(BR|(N_jI-G65{eTKj{CrK#eP|8_HOT4!89pT<;6N^k9)gzIex7V=T0W5#sL`I`t zTu`BB?IHGt|9WvSmyjs={yp@;LVe8Y>T2d?MtV90BclS^tL&St^>r~zAlt2GVF&t+ z80hGyBOo?Cnme@On9I>HsuUZNR`}_-!V|Xzg(mC#9G;rJHYw?5|MW3s%<3%3MS?Mn z!KhOxAdVqvcQTJ$+Gu6eEk)QIIuJfH<&s?D0P#`9j;E7YaA5oU3a6FsLv-&q-mJ~h z`B}%mX^nIl3yy9LoyD9)8ZujUy=rk-PD!Z#S|pa>_v`CQr^CdC;=N{!M@ipbVgIQc zuvDO6Pr}t+--1E|hZ<6Ns{#{263b&4><$k4h{LF2P}wxx%a^=B;VIikisH8)qB?Ww z#eUc`-{$7#nFm1WR;sbm7WeD+0EV^>fY&s(K}915tk4(+6Kl7AA2s`SmwwvSp(?o zJFhrT;!Jw4YrUkK+8KGtBXj5)i|6uNfG13Y_* zH|*c0U}mvVen!Phh@wy8x#|yqV2FF*<^ieP{_&r$9 zGvKh;Gfxzp@*gi*%%PY&v786kpAK2gE(Lpt?zR!b`mmjGHO-0G3JEufKQ3inQsnA~ z*|+hopX>eDW~hvbNDz$3kxk9jGkEK^->AW2ra0!?7D$3IQSg1VQavKiEXwRsm>dWR z#UimrA3M^i12zhROF>N9YdyOk>aR;ujw95kD|mwZgZy&4cDxt(bD1g`qhAw2$HbB=VDk;jiK#&d8{WiF3?$yotOI!(jn30#vZ1+)T7$U_2es;0+)DbVEB5#AuPR8B@`X0zzv z?>o15jb{B!B~Fz)Z3cAYZ44T39-$u%eFsMe*tmjfFtHzcn?Lp3tu{Pw``8_4m(%l` z=bFrOUK9P}#aHa{I^j9Ai{NM8Weo2^n7a{7?B2~o8`E93DarWQFt;<9d;<%GBTn;i zm!3B~`=cdwGzYAVcJue*=ewjfBSro{Te4Q9Prv~#A!WTi?B?>zd4U`DdZmulgMpUW z_(5!;BB9)q>%Cs6`VfzjnHF2M9p(Mm%neJ57jM_ZR4lXY_AyoolI!#=sGSv}+MYsM zZFZ$N#3cHTiS`Z?dwISxB_%SC>!f>rCjnwdMPoZn5_qpBGD=J3wpLbJ@q0eFU;_#K zD6DzFYS)Kq#f8@+MQChjh)oUK%%M)Vz6c4u@wvz6U^>KZgcWZd{CIqxlD#UBCh5i z#I)sd73Z2(XEC}8*YTJXyFDS}Z!7DH77ncFm)@Ez^=Gh&1Z4%r&?$aSdG*X!DO>01 zAtusvrSUuMp}NbT$uIt!Uq}Bpzotl$>=zR0Y)aX2lz5?Rs&J?Lp~g$nLlnPpW2$iD z_ORs(+wtwz<*Q5wjVh68!RbQ(K^i5R;cm;CMwN4#F5(QVc0pjh+!TGs7+IMZ~g zY+N4sN+0M`*^(CuBTD%P748?V(yDc)cYhk#v}Nlb=!s;g7`PR(s8>2r{RP?73Mp8j z!>N9b)$yhE_umWHOm{X7#@Ex##>?fUi+z)3d13>d4D*a(Wwp``a*Bt;lBIJUm5rRr z8NMCddK?Gz6{?n(9E4x};W(z2msg&SR#P~}_Vs+Y zkK6um5Jd8H)0T0);W-(nn&vdLygZq;ZvFkGZl%|#5S3~c6^myvn~SrnS=d*{9DCc-?1a3oQ0wiJ zjc5T*)2+Nc!d$wI8(xfZXh>JIfQXSG>C_V)xzW?NSnRQAaLjyG)<86wDcML)Zg>_r z*z)licB52qU>6QuE_uoBO%_GeCG-x#At6C1qc;9Q5IG4Tt1t!3!irK1#@jDD+pezS z_&B%jJ1+ zx)em(@IIScG{l=>D5N7*Ehxw63A2pNCpJ>k?wzlom)bCVJs49YoQz&^tR^H{XSY=? z9NxNVXmxL^U)MO76(l)r#pU>3OlfUS7X)DHn(ZFe%j7L@@06DRado-kQa3eC6!Wy* z5ItK)mz7&#+2~!TVpR{u;jA*a&F>R^>0?itLQr?Uhrq%qosiR!04Y(jW;m39Vo1of zBFJ^pUxf}_r-o44=rPQF0q?C{3D~YDAw{2?FUMs0JOJbbe_veva@VbH7=RBFDhiXl zF8mPZjDK63i(1DX;MXu;GMzKSww-n0{Iri3?3VY; ziyT+jXa4SzqoTX3FL2ks^dX31jp)Q`F*Qih3#%D#c6HHxW-rbw`C-d}N7>ry>ZbVs z-}S!IA|=o2e3>)SK-raEz{P{0ozh`B%gwW8a9U&3=%faH(R^YkCW@__GiQL@ip!Q6M3py z6n@oK^AaO%U1NnEOnGichcEkol@)_dvZD%FBag+KgL}Og=Hpj7o)KhH38KKekLnFJ z!Pa2r@Gp+BkG*aPlzSRJJoSztXbgR+hv$*3z21?m;?eF4-V@4`c#5Y7Z!PkGmut)Rh{+S4xm!~Bb5&fv4DVa? zffqSyL>=fNK;0$epUvk)mYX@bPo;^e29S}cLL(# zlTgIMKIKflECkTeLeKX~RFn)1=LvAmM;l>%R!ftf9G&}3m1k{P84}zkh0q3NguxPK z!lvx%Ppn(R7f{~*`p%=xW>H;qQKj7XaTyUBs4RlG@!1Jzi5Oe$++`_cI3l^8#DRv) zNUpeGo%10v5+U2yyE}LP^IJx}3})MV-@O=|a6JjR_07%UpdM?*A>OAD32=S#CQ=(c zU8{^vnKapG`plYigO9V{fU=MR^^%L@Oc!1oCEz8+3OY zkj3>6rcXGwW{2nIZZARS)=l~G@nZ04LjcwNBRx?V%A3en1t#jP2WhH!30IupPdYYN?PE!={Z(Pl4{&?~iaq9FI$vPMBm{;}SPh|w^QizKPAyX8QE z?2fYj$4I3Sx<=hcN*=D~S418S16qUanb^s&jhC@iyEO<#d2wcBI60UptcW zVK*TfG~^!M@YG1D=5=!Lu90xqU5tzNs_VBS_Ex1)T6@ukHfKY);CRivWsQ9BEy2(1fHBdK;Z3JU<&p#P z6pQd8!kux_H*UXi0cGQNnhmMJo=?%)W@OFB)gXAK2BG75?tG4$U1KyUVoz!Ed z(^qE{I(ysNQom_EW;&-yav~ z6DVHPZY5U~<^jwiCnyrHb_2c!4$4SLG$!76;5%6O{pCNeAZqvtx4+m7cs%aGmmiB& zN6W2Et`_6FVs-0&xN```i7FgDJ2bbl^DoiVb)+sW^cQT>u#y7>cY?L; za06z;ev~3Z-iTJ>j9nz9yn#g z52PeDPp2q!8m2!Ga4*N= z1nD)mGBvGMIMFQX;ok*=Vj12#>9ts8UMAxG95W!TP}a^iSKtBqS(S+YT-I#AF7vDw zb-3m;=%3oX&*{69dp?8{S;gO6@ez*gKMtmIBJDcM7+%$D?~Ywp>`Y)+oMwjx|f-u-enZ4mCesr_+gW@arWwTi`WfTe$BJxOy`fCzLj1&4Sp z=X*d6(z3g|d$}H7*2gqZVFrw}11OS0o_e6Ld-(!JbHaJ6z(J`=$i~cTZ=iO-_dk2q zcy+Rz4xg+8OTxLXv+)v(PWhDVSF5%9?w}a?KdU9Dc`obf!==}jmxuv*Xj)BnrWrNJ z2-`39xuT|27++T%^I*GcVjv)9mT(A#y1(-M} zfIU^!;MeZb=p3^wbs|Qggu5cMN_PjG1v#=f1vhTQa6Nh0uwc>hiMU`*NiL_WH@dv8 znfh%$_M6?PT6)w7i%>gYvs=a3er6&uO<$m23pCVBm#^MWwF0}Z!cvxuocxj(Jja03 zyiL~(FavzmbXd3d0?2TU0b<=YSQ;^Gmb3PbfFAW7NFd!Py1L%j6T7nY*F_FK57#Sl zTg1CsiK>=m4)jMYX{@Stj>Hm%p5frAyt^-U(ChyjzYwae;DiTB$?4D^wUgna{2T@ruFJ!!>YdOE=?NOraqF!y<=>}k+K6n zGr$G-U~&_2W^`#MWFiQo9f^kyYN`)oVxB6^Nkg+*gg$-8O#fhnopKCTy2 zGBZ^10Shx+PfH!S5{=GoPOnlIgBk=$d~@Y_jrlyqjTa8P<6Pwqd!F-jq>{P^_T{k6 z;8ABigMK=IXGH0HxvPRSxmkE_1(2QQ9x0q34>Nn&qK@1Y+xAM5ciX#IU>q&x1^vZ^ zw+C;p77`}C90@o?ykEIXn9%8@N~)_;(6L58Os>ngRtk7%sxsasq0U$G(r!m>ySAcQ z>ZM(VS#bY2r&^7sp*+fpz{CdI%*}&DHW0Y>`ZB&{64ghCQeUE% zFt_%j7gM#5T@9V}OVE<+Xf}Ow&LRDxEWH2JJU@99R$XCH(swSaVe7P112G&Gt9o7v4Ad1-jU9ad zYla+isfSpO8ny!ERIQ28K}$HADY(0)qt_nIRWKtkkx(G+exBs@P^S)+=V`Aztwu4Zlkd&Jf{OZ8t3Qz)R5L$ zJd<^mh!#zH_unyh$Ilej5HM-vt0nSQZH}jTva)mTG#!qAk>0FziywSxKE8LzTMafM znf%5#n4CqDBQtgsm$NNqOm^?O`u2b;S)gk$F|GZ|nX}Z8sVdjY!|Fbi_qg)n2zk+s zI#k-Yd8oHes*hKb|EX_dAv~P!Mn$gkMTvIq!c%Pu%34IDOv0?j=6&L(iBFULrj@&= zq~i~*zS}?+t9A*9y7JFeTclQBI%Q8!LmN)YjC`E2Rk6PLt}>9IANPn-LnF9T_w{cW zNZJ!LM-dYF*boFNWBtd1R!%6f$G`U7PB#Kg6PrJ@zBv3Szo_DVf1eMi26xA)(r!oh zm*QmF!(T0=!v?aAWE|`zPz%LqP7)@Dgchr*ynCUL{Q1vR zXEjihn7;wW(oKI>A{!h#1hCI^*NmhqlfNGKEG znivL0(;}NN8v56fgJks+_-@^OHGZ$efmBT@R>#mw3$vb3-^mew^fD!eJu5SXoP6o3 zx}5mRqT5`<@Yk-T;#H}^zdqn&prS!22*6S4cgQ}wsW)Xv;V(1K@(loNFRzbfoCV{N zR2wTkrt&gJgsn11L6|!J>tnrwIh9Y*AbpT#O9sU<$?#3GL2%tB%D;$`S6I; z&5|nLNOv=9X8W(!C8W=-vlv!}&%b;GnpdT*Ps`Ubk+Rv#Ulbq@_T7NwJba-eas7a>9mk?QbVr}o@Cj7)PGl6-hm7rSaOF-U zv5g{4k-9;B$zwg_jg`^$tPgap%oBI8q9IkrdTvfBB?crxfFDe0E0YS}+qnLOTsf`67l2&Rb{(j0f3*$YJJ^#Q;-s1rKNS^LjqF{; zO!XE93KM`wr0C0(d9ljEuqcmeP`dkrD{rd#$meD;__)7Poe(%Q_ z@1v)2|V2L>`VlC^{0AHxd3XC0%sMqVF*kCwJ>nKyAs4Oo?Y5R^WItYAOQ!+mm< z%HE0^Np&b>qtBlwU>MXir7GYOiwT1m#kX0lsc#Dnb_4cXYL$}#PE){ngoM} z-DnQ7oa*6U=CbD3>?(!yz{fBhUrIgwvJ8IhD-&bVd&fj(?Qlti()bh$kQLD#$U((_ zha9hVm1_Sl|C>*y>3TE4rPpmU&ZP3O^YVcMkH<9g$yMOf-erHy8CwHyFQX3IyT4yyVGCWr^H_NmgjEDQ=^se-i<1WJG}V5 z-r3)9$g}ywBy(?Fn0QBEyXh}|;7YLKeHnpn(`TDmOev>g2riCOKk<;{k7|56 z$Nz=`$}cF@FRaB92Bl_xQ+{uD?P@Ly_^fRAMp2(_>G17!r@logjpGNL#Eqepl=6xS z@5!LXqCa6w$oq3-H~)NqkGY)5TVOsZ{59XLHc075}#KxP@R*_%gdW;kpu)gO2ur567|=wm>Uo; z2|#DXV{VLtfym^VP2|f1;onn?3cD>Cpfi9Ay*_&7SvvOm{$G|6?hl2020yDv5|3gI){r0nq3j zAo<~5MNdNFeRH-W2>wfX!$3K)0U*$|ALu8Hjg3WJLanL{TkY<^gfwgC1DL?9MY|v} z55{n~J_94!3s!dZ5w5#|N;UavDeUxvPHa?y{&;57_keqy2F~VNS_#Kbk!2mRKCrF? z!#UCkB0v|!cBGSIK5^KOT(7{X^ zUS;~xsO9YYG1My`ZkNCNn~eXxY-qmI$!_`cOdED?f#Wu@=HQ-hACr5gKHd$dl5SHP z@4R~kZj~KOjY+Pkk7|}~;ZuNoTfe`6kzDVwZpzC>-~;uKXyZmGxeuvWA_od4a4>`ICR;Jm|d6p~9`UlL?U6)s}idgy$8YlK4719<> zrK)wlUn+hj&-fkaNXr&5YfJgT}=HBR6wK8-#_$PmdoRkQ(a(| zJ`*?iob<94oPbk3C;5UDDZd)3keKD|C5;rJ$QRKDk#iLf8$g-yyMRUeDO*9zs@BN_ zmJ4@EqxR0PBuZrW3|~3aW%B<0u_M@#A8%VuSV|%aN0+~UbFQiMy99Tp*};Y|#-}6Q zwol@HP0rr06G;Sl+a-%EcSCv>CVWqQw+EE0QqjlO2ZpCI(pUE?xjxxNQE+Av#elmw zMF0Ju*$+6}lY~dG9{zkziI`w}iKHQ=i4oLkxaYr3JYh){Uc~Ti_t2<|L^@qlK1`a) zeETMtvwwa|ic}rUl_vxZRp%qe5EB-j=?>E{}b72Fug8UBZl;Ixm{A^@HuH%1F`FW~D0sg0W+D z=#2NT4-%@$irdZpxe}s(-oYM_l3^aiM1Og2eKz3Bsq*QoA|%agw}HN4+yN%}u$c=d zuAJ-j-*WWn=UUA#)0^*Lj6($+*S#3m3^EL%KHNwo`Qi=n!ua)$?}lzC+N-ej32(9_ z-P@^HW_W_}rD^{@@z$oz)0t~Q6{?{u0>=jv$r|#E8zXwBbbV#~g8UCVq!uRFsqA_B zeJ|My6&`l-d6dq=c4Bu3rD1Xui6RKkEf;t4^C{NjxEOo4bn{l23t7k54VTvm{DoFO zD_dz5!bInIR=)EVffGHvko&pWMUYu;_j6>CsP%L&H(MTvptDm}Ut)gZDh`UD%gs0p z+g=${H@T4!#skNpV%d&xe!y&MdvTz2@`+fw|7g8&J-Nn3X72If2!fvHfK|B8s9A9Q zIAhI8F9?U^bU!6%9_RMSE$nyXA=m^oClw4Q81I$YD~OgWPxfi$F(&TSA?3dP5ZeyU z0-5Pngo^LGaCSkAyQw*2Kj!_a1Td-$<18fN=hv|lWSEEajoS!QP8l;ukQW)N*7s|I zS{dTa1zmw%FZvbUSM`F(_y&3$X$;Bf1%wt8Bd>tff}W1ihE2|?p1}5$Z_L!QV`s;^ zlK;U1e6YeKLpCpH6i>a+f+{3atufrlAo;M<)Tkm#U-q;vICZ{X8AiJGobTqApHYIr z5T>^W+<8UQ82h*i_K(eLs@dsk1JcDuDM6$-&v$SJVwmq9{8{q4_02&YRD8Nhm4-wO z=oqw5OoV^_BB{(xubkf?B$YniIyIo*_Y#&T{L@f)JXBObV`JWmQC_s`Y;t za1McWZ9-#u8b1lv{rrt;EvpTCawnoAnA#DQ-pHbdgf^@p`l?2ij%pAA&f4Gid>@Cw zEQe1{78Lju^Nm8-t?7~}&&BbJ4_M`{2zVx@v6~uDbiyR)v)ULn`MAJSq58h&c_uXy z4-ap0)c7>wX1h6a2=ahjNxj+xAGLL;{}^6{V;Xa1%+x0qRkG}G_Ux28sk1*_i^HU* zOvHdzpJ4m!gBj&jy`T51<8dyTc5Qe_zHWTTWOK!d5S#V$)^bVVjlE-nvuIeM-%S6Hxd2*+R-eC0DE)V9sp~hQq z=j6Zd6ytVS8oS^wvpH9o*V(V}XEU$j^Z}DoWfCbesIbSa#YcdoGV9>iBlWLf@Fv!q zs64F!pz?5opAYR-^Cn?>hzK_a{8*K%BWodEsQ2C(G>v(ip4s15m~l7pi^_#owNREKPYj1?R&VJ-o)Y4B0=P_$BgM4>Eq9{ zkvL{l1gRPUL(dwI>UY*{HY%TInq4BTs`jaWuVVV+Aw3s1xw;*Zez)otv!I#y=6uJ` zC}pQa@26o64kXAK`l=#^I*QK%>H3jO|m6!lMw$b zD1T2X^TADl+r)popQ07r*~p_an;(br1kccAtRBY@B4waJ2@@cjy1T2al~}aGui-H| zdJV1LGvf8xFkXhVFxr0e2gu`Z!v@t4`_M%ZI3tP&nC_w>j!mB_G595X?@KSjTX2>l zLC$N+(UU}o1{KR=8(Zs6M|l!x1+k; zlm9Gr0~y{Hr_!sh*3*mj&IQB(Bctg;hfUc2Eu}~@)>|(jd5q5>5F+yi_XVhvQ7q`a zxf9|l#~N5g$;JQX8rd_+{nd!6oa1T_AXQA-pBK*h*g>EDow7P#nA zt5dAE^+gi|xS0HQCv0@UPFt-p^0&ckq-Npc<1=tSzJ!S&s{%=&OXBXge6$!E!@mvY zs6IM7*&AYcDj|DJCe1BPg47$7H)J$NqW8#$72~?}2+U?MGJs{@&&zzX$*oegtL59kg|c8X!G ziEbR5STA8KZ%6g#vgSgVGD4RqWGpEckC%FMa%Ip9?r{vVUp3%fz?hUG}3o&Hrt0G{BKNaH{prAf;83CK&9%$=V9HENL|7#U!z)Do%b;# zgjfJyt(?&72~Rb_=HdK*EVn{vwq(>;R?Hom5Ul_(?rP%i2een6NMrpn z(nOT6rR~#xxO7}6V*7M|-O?Mx=o}b3*u}t)R|*%m#|mOZ4aB&D)MQ!c%)!ci?;69q zD@H}?j?*cXUflKNY_X^Q9(TQ~{J(M-{ano>t*rYI0AWbU^zac&CV`gjm|IEYP38{Q zwJC66cYqi|aF&8KqoX0_V3s9BUlGe0mM~=(-8pft3zNOIEPjGHveDCOeo0FF8)gtL z#FQc!r!}J$r|}aFtcF+|6_Sdga=^Hl=F7?2dr*cKQXX5(n%82^i;j0wrpX_MWmj;| zm}MbJwwIB)mACxZB3u}p_Y2*8xfmhkM(VBRACjcaiExi^0*yDbFt94;nIP0Ea1B!% z?W8|$U)2`5E-St^8}2&wfRv!lWUI>?ra zG2$mdKo$m7%lQeu?&)knKP4nk81iYjM6vYIxfZZs;g6v>$Qid89XaPP8Q9ESZ>crP z4|1J{c$k;5LVHI0@GU6iE-6_Q>vd2UV`4sn=}7VOLWD8CC_1y88vFPIO2_09WEFY- zq9Ne69W#~Ia-&OWqC`oUt7tbQR5*UzY;=AZw5o&{^=+zF-W^{uyJ#zgFgUGH+HbsuC65~pSGXr@ z+_+aKO2eNpNUI-9N2Jcp%(@wf)!uWZ@fNCh0_4{rzD9z4yTK%gD<1_XFWiQ6EMwXk zB2}SsL|&NaV>5gsjR66x(^yV%d1j4L5*H9EB;%laSK$`bcmLK)0oDgVz;96uk6|Ra ziFhlIy`Xp5B&6gM$|T^s{w*F1g50Sb7(E53dLZPnb|cDO~AY%^FJA zypN0NdGBXo?b=|4MyR(U1~Ut0zcM~zmlW3!Cd0&!i^Ep0!S3)1Gv&ImJ6HLqQgzee zB1(o3ik_r4Z)g*{dm4Kc-H&Hevjr7SrmteaDRS=kA*y=>WO}*~E-aW}^ zFeyLV3~$_8*$YYcc@ajmEVmgsVtf3i(hf>O>6G46oBVjUL)ZVxZcdQKj6!nX#W#^o zd^wTt^{X$a+o^}Vws%jQow{FlbTSl>X@PO3xY@O1dwie4_IMxb8Q@a?Oi6=gsyqP| z&oh4u2Kg`_9P6^r1ApB$S0Ox_b#HiOg&rT5vAgMy^$y)R8xkSRM=RvHVcsaSpUK0{ z-XV0ghziaByM(m&wk_>-*Eh4WZ0f1X0C7!daKLB&lq&s<4wG44#>lF(^#krigeA;v zxM!02BR8fJX1NJ~Ign_BS|%#vk3ReD(r>1|5z~qcmpL2sRy2hTI=!OWHL>o`pA%}z zU?VO$m04yaVf!0)X(>aWkZGcVyH%LGKVpRtqdgR3nvB(ky5j^tBUEhy-UWyDT%R-? z5IX%cF`Fwh|7Sp`dBDxAds+=E`YTB>5d=|298*@tKi=)HkhQ`@CBaCD5ZcUgB902# z=(*&WvP>}R@p;#C68{ExeH%T2cY|Hcp?V&mb3D@ncYeAZSiwd`iN zzvuj@8SE)ClRR2n|D@X2cy?Ml>;vau-B@_PrPAMsTMsF3Slny2z^Wy?JsgL+zs{4C zrnv9EGb{kzruGY7r=%$xli@dl7=xS;zRptp&DPEYw`9KG>H#f6P{4Tdr9OjEGm-@P z(}gkP^4|eTc)%2cK)(c2?NPW4=0!+F%{mr4eid_&_9+e*xOjZwtb=ju*+u{5wLZ>I zPx~l^9j8U-cfWL<0oeCy<_>%c*hHpEUvb2 znDvv&G?Bxf67%FF8W|JFqfurwnDh zOR+oiUs@HOIXPW!>@N-q$lm|vEp%E=Yzqsum73VVD~@0xi70I*Qx|L;i0(dLogU}2 zE)0k0z{6v!BRh^|gZLMv_|>3m$^%Ee1;3_QTi26#krxY{Tm}Bsg$2@AuYW&JKXuIV zQL6lalYf4X(y(`aY;G;Jf67Q2yuZ4`WTWOCNY%4R#)AE%9Vu{a6EIUf8b@%^^A4p^*`tqA76SsRp(kA zgK<|uKr8%+RF{7lVLgIZ`xfCuiw8YIeR5Esn9@4`gG%x zny;I;jL+eoYGxO?(zxThIl%|5qO^NYE%bjeNWWB!J&T|1r?&mpFMVLReNuYmIAq56 z$E=ab@x4KiqScCS-?7`t8>!y8K(~GIwJNBJ+oAB#Cgx?dS;u2pu&NDI90Yr+$< zw~g90X}nR?Kdg_f6-&o;Bosa5vA#o0EA{QBy|0Z!?XAYQuhWOv2GuH+UYoT?-ddSC zVi2l3iyz*9lE1pNepn|Iq)1fP{b2ZwPRi~d5*043-)B`TgLm{Zd3G2MxORRInsP55 zhel5ZTyro8Ul}W05)63Lyk>AG-CpV?c^U2hJ1T*DqeT8|ygFCwSQPZU=mi!mU@fomwcAV7BUISG8L`{2+9E zz-~A9VoZohJKNj(L(C_oCj$usSNf9}+%?xT$!#51JR6x=2c_*IVwA}h4;V8rQPXUi z@ksOj{|aL;f_TBH`+M66!d*gwSVs-Q90%#&gBM5M>awVrUrywCdqTvG^g6l{jd8l6 z5_bB@(sJ~eHDzD+64a3_!n3pB*_0P5E%Pl zKuxZLUeabr=hY74>Kjc=u$@0}P8cEX5_wUeL#bDUdBcih$uuXXc~x7bTFwN3Ri6*A znLk~W{)UYnj&h2dwf=*f=!`-bW?T1sdP#^{?|-qD)g3=9$}v&aYV}LhbL1RJoN-Zo zxXAjSzW)nM0&fA*$+usmr6a+%A^r+FCF zjfW*h^^ze+dvD|9%rZ2nir(J%diw6WO_ZqKA zE!WPz{Z^CBvjyipYF~bK4oF%O8w($0jEL#?MBbMBFIW=^Qbg3gqxK;8-jNz<5$B@( zz1PW&uA#>!>B(%iSNumFe>q=v&!&?*Xon&^L{Q7kM?d~_sbmMrzaQ4Cl>`C`9R+YZ zN1@OBqVoa-o<$)BppXASPN5#r%@_C4aZzM}y7do%S-c9xkkMOeJO|_HM5V_Ys&~f< zXH`C>2Rz4q;-yB;ES#r5+AZq)jYbW5f^aynz}cY}yWwM3ARR|$_ZaZZqo6AFe*pZI zgot6^f0!CahnaN_K*AYk6)?ECVdHtmzEu|0)>?3&Dzq7Q+HrFwNg$!ZVOGH*<=S9_ zDhGweiBf~gtmCN`Ik}}Ui`yu>(RI7@-eEmq_qFSRiG*~A1bGqk=;!zaE^jIAtb1Tg zI07*L-?P*N*3et|9Q+B-%lA_gtcG5{Y`GfX8`O^mYles82_H^d2=sS_%Z3lJYD&hvC)4}<=;&Z~oH#aH)PUGrsR&NoNSFu!jdI^7MP z3Ei{frdYdAisvg$qB+?L2@~sCL!Z2nRG6^2pmi{L_mV{|zT#JYdu8iab-dOW z$w{*4!WOaMgI5L zB18ycA0nl-yYP2%l_T>+v*)v^{~%)o*>?vR!mr(y9pp`E>Rd|RrARw-5Kb&O#HCbk zAg~eg-K>44ZZp*QR(jV@Ikfv)G8N4oSe^Kvm>-}86+>Gd$Er)Q$cIUAn{n!rAh~PY zJ(0+TjkFoMjer6|)gal8kGP1GF&f?FQuFtxcgQhRvV6GA;#DrC!Va^V@W ztW=8L4T>k@*ZOXcyk!WBo0>A*{owal<(fC|w_hU|T0pD)i zP8AP$dm;FT*-m#4xxWd%P{likw~8mgaLHZ5DZMd=GALeox5Smb<2d@v#AW7Fpe8KV zm@->I^zH>SrJVPTf?N3t(_-6gKeo3%d*psU*}J{`%D&4=ftDoWa|lkZ`N!vODrL5t zX(pnK0>7DKy%S(oW+sT66XHHr0f#`xl5McGU-WFsp388Dlq=Pjy4QB!yvycl@7n;i zfZs>Y@DrUJtDEMyt~>f|Ey&b3AB%pSJYeF|u86@aV$L7Ky=ZVmd1*LkIO51^8y&}^OOTHE|(%!j0;uF01gJuG3XurHX zNZIw<>vIbEFI@oBzM<^;*F&c#&7a34kNo=BkF?TfG_-THLw~M%{OHfs%FYGLpbVk~ zS38e-UDrQU4n7lK(N0tH(e%~6+0(6XV}`N#^ot#PZ@MUd)#|avYOSf>vVj5l>e!(;U5aXBq^8u?c0Q{lxiecjf0_echEiT5w(W!Fd0OgW_TV z(=*_=(F5b8C75dcLxZ_mq-&0hRi}$AS2B#6^Gmsbi$&*<$i)tg)OXsf_!B6{5@3f? zj@G`7u_8|yR4G?R-u*H@X-h*J;2Og8H))j0SIG|J#h*5n{_@Def&zvdTgb{q3MS8w zBNE;XA(Tls-RSj$KTkiqj7Ye;G%BGXgmWi0GD*0Xrc%ITJu9tr=G}gP=J)nd003{m!-wpP7YrSj%P@71{rabrlV= zhH(DNX$Y(@Y7k}Y&A#S&N!t~JX|))`b3I_`~u#g&)-9E`I`UPc9*YM!`*N_ zx1FRtUuTnS#oGN@x*4xu1En2LW+T?q+L$t}1{*9Bx%_k&8+I8wzI~HU6*)-kD7j*G z{PT5n`h$d3vp8j(I8wYkj@w>{k`U=$D~@#)nxAU0C`I;7+*8Y%DU|dqgd1g+R$-r7 zHQUD1%`;2q`Z^-YQIw`iJruWI(eU-&OjbfhWSoo_;=1oAeyu0=VX{`a-$}>s5};2s zGcbRm^2i|j?-SsPxO$jY8GFgLXNmL|e~&0@2cqv!(;lA6;hgOub)80{xQ1(4%`md8mw;GO%rl;Sl4j@7oislhJmG zR1tx{&57WUf`Tll7)Ueke|!aldNK<8X`|7c#VQ0IUZ4K`0p`p*t5aLWL7kd;`BkGd z5rktkR74@2G7Mdt<7slZNr z(cmkX5L6{QESLVX4=JzXBjyQ53vESd(GhFVO^Xl)>yDdrw_L0!^|s=9`>GI)W%oPS zK4Mu)SLfUw(`L=n9WsCbl9D@T2NHCtM3bA%xp5zh8yIU{>jvxaDheho)G~+F`6Rws z_0K*0eEndnlll9v))xv_yQ!kCNEI0@b-OvOjdNNAOd3II@r$F~ecSyGNSp03#Bi zcVSduhlvQJ{BvM0;%Qs1z51U=!+Ig-=$|f{ozkjeB5h7pEWjnOx@N&oyE^$`sV#@(}BUgU(yk@BDOML)~fYO zwR~rP0n>cSXG&{`%E9&z_Z6kj{sF$DXU@+ih4U<5zC)34B7+~mk!L|}tCLLy5O@0n zjL|@u2-Ja%Nu~a$;kMrGMx|F_@xtFE&P+DY9gNZyuf%%kl9Mh7`u&l9uN9x9!^|1^ zwm~3F@kzQ$o9g9(21)UUXGg6J%Tj0c9((FWLIJzN)?p9syglm$fZ0DVM!(GDP)3ILLy6vih}B~@Albiy9b7!{=cWchtyG|3VU*f(Mb2>_^(4=73-2J9fKPE zv+Io$0+0$%%9Ot$f-*+%gz~hafkJQ&Tws)kSIUGSU_aRP3+O){@%b6&?)Ir`ZCQeW zZ$j22MgN_VuBuTxI-rpfI6B!->mGLWN_j2$_84Ca1z9t z`G0vcDa@^z=*sG>_z6c-D&*tY9(kEVheb_;Hh&Kmwti%lbp<<1<5#%mH`|lL1V-5E z@UAuClHewTXIJ&D<#UJeyU~+<@zv{(p7)woUA)J+_C-&$0Zq5aAeDAjyLXUR+uIT{ zpsUi34oLYhW65ZDEZ8q;FJbEYjwT`wwB_2Q)|FYxO%1C}l5@cymw6=eeUsaT`*cs|1xS?nZdK_iX^e7R7 z%pnZ^J&D{((BBJO0-`rE!tHcvoyspA&m97#d*I8p+0=I@rw!Le893vbiRXcnk&U7W zWHqN>1=e~$Chd{>mFMIB*&5ZUpopc|+LswYI8m`|sF5lf=YMwMq1K9dBx8li^0+ZX zM&~>OS7q%%&}xvLlH#p4!pX@wNX`=%g>3NLf7MEUcca2#s<5j)J{kxqV+&6=*W*$= z2@~-;>Ve{I8}BS^KU`ePtvnW9#Ttu`N68FGXevMqN-**nSMWTDv+(t#AkM?UxiEZh z1H^ywzocxLzhMY$RYB7)UHlKS zJSX0Tp(ovXa`F1}AJ-XEPMXcWMi_H>V~Jp~z&N#LvdmB$)I73lTslnI8D;-|BSQK( z#NtLXLQX;jFF<{R;#1T5V6%90iPh!bmjFb17 z(iDiKtD^sG6IVhhy`HqMfcdz+*3`B7!|){#RLn|qB7gDReoN>uQNrrcL!JzIZWXoT zh?fEN4u?2C7Q{};3s3!fo;B_|iv{fdtnPWhiLYR_zg2oAz4#IjiEnx0SvxoW=Ucd3 zyQGzAp|k{L=2IP3lWYbT0l7*3pd^_FW)q$4|HhiwA?bAqVK;d~FR9`&~u zdo{VarR%L$d3u)k`1tY(&xYX^QDmF$K}rGn$GvY|@sn~DF8M{C=09k<(#@yhtQCO4 zF?WCP0U&FPK2tGRuymwAO~oO=#+OCjeAy*-q7rHlL`v?=RRnMjg)Qo+nlB{JsE~JV zLXE+)mq{|lZ+%zS$BL*n<^)_pCR1~+l&+3fiBMGSmB<%K4JJ&CvHW!8071~rcs4r+ zYQbq{;=j~^!1m?qX)iX%oQ^ijFJ9~vSwxc8fAPKnMTb=_(UvJC@n)L))W1{?%X2uG_PJ>Jyn2|Ye1G_+w zTQTk<6t9(#@OobfvNc|MxG}~R+p^(;M^oaz&glhv8Gqz>h*u=$wg~Zx!f@l}?-S?y zTZjHy!ugHE;Hyb)u`oZY?^DGWfKwA2#A8sSsV8vkrgN7Oe^l?IiUkEy;$d5<3+NsZ zY~H$^fiVY5rNQA|b1>F$#tn@3v$X$&ZENwAqX}PDSyyffFw0%xirGajCXpb$KW3#r zTht|iVSr-40w~CoRg6!A=L=F~L$4K$zFJkUxpBv_nn3rh(SxmzWgJ8W8BACKmma+yqQrwK zzNP!j>H(Um9fFH0VgAfr90I;-qnlPV9rcQrr4>CbkJt8wS-g5zgh#0zy;s=v9HO7@$z8_RlT zmu+meRh!yzH0zc7r!vigm;P*rRQlH@_y>LyL|XPxI=PXR$}ITMC!9DWlc|o>wJBdr z*s8Q--tAh^7ibNuGqJr%dkMC~ccoo}se+vaeyMQgVGklxr#1U+?me5v4 zb847Sip5pd$pZ~5afV$R!szW;GVZK$(k>k~60KHgP>E$lCCt?7^5x66)a4r;Ro$yj zN@#-kY)^D3wjS&sR*k68t(Tu(o)PgOpJnuk`CNt%VJf+k4(jf+7SE{SOZa42Uc z+Tt-3cCTIw#ug8OVfeICWlhP3jLVcx?jU!7`jv#dCN&A$%*~?rol-Togbms0r*$cj zhk|jwcoC;u?hR=I<0@e;+=WFwTIY|}J898W7s7kYD==<5v(hT9*>DKRe7@iAsNBQ_ zBnHXcOGYW@&UNm%dNq7z3=@3Ji#Hf~;w-GET`^f{mPJtp-$Nwf1(~LVP7*;mGryh0 z3cv0#BvC&O&=2S83+g0S)@s);+{cm|k4cQN^&|$wkS4+cMS;tB5XYzj;Grx``Vqw# z05dixZ6b-J#@qDgk+bhe_#vG)#p!Rvi8pB-3zB0plKb zhf0`Xa3?W>l*!x(b#Am?;Sj=v{s3`6TZydrmQ}>)R`gTPUlg-p19e^xS8FE!g(>BC zbN>|g*rIF+)3a{{NX&M?;4Smox@T1F%KE{y-l$glMWu3AgRMsU)d)hQiZTYGg1lHB z9y76atxqqaYRPcSr)EAWn(#(^fTn5^50g~UN4H;klchGqm!$nu_kDb2nh4PwtTJ@o zYwG}qA@BCPC+=%mpcF_-T1MPVg*G+5b8g>!6Kmn^q4IZ86+AgmscUea+M*-5oH>r#wY_YGfc+0a1^38 zj0?O7olVT&fNa6O&-xXS4M+hDKQ#E?ejEz$P&2DS%uOGmVxq~?_BiO$s1JnCza!%U zeo&M+?so;0+wtfi+a3H^2%iP@qtL1C1n*gtz$f zl!eBT#c;GD;2u;m@}z650F170Q0O_=0zf$+ahCzCce92=oC3a**)MC<2M);U_2lg4 zUhqHW38kC6IlxDWLOjx01|J1m3>%y%aALk+F6H=PZ{19P~=PaL=` zND2+1j*2k|J#99fLP>6f(NbbTC?JRr)smkWjB`Tpr}`Gn@A6o1;T>|dNae8)(QqGY zpoO^Y|9(b}$&B*#f}OR>N`d8beJa4Qgq|De_=j!M@;bP4Nh}IMjOJMIHc)gKeKMJ8 z*Bs+Z6C4m|sv%OGLIs*k-e?iyJ_dmVmdkgPf3w0AH2qUC=@8nfIVD}p*NtKmOrYn| zWBP(Ng-jnB3^|Zrgms{inNjTK;m_d<+{;unSby7#>Qja&Jg@B%=2jK}xLInBwZvf| z!3Txt=iiAU@1!Otf4H>J7R?Vj+EK6Yw`7`35r&3_6x7toY4B@yCPj-5H&BKV8uOEZ zaqbmN4eu}&%s=4ErJaC^9hpiJ&->>l#aNhG>T^Jd%z*0S>9wAlmh-uE7#L8?*cApN zQ!p^l33Ue~7inkmix>M-VMKJ@Be_~~thMF@xbkUDy=mg_Z-ihsri;1d`3}bhp+hLJ z8r40ZWL*QFaGNj@_nWG^`iv;pNrzBkT(6679D&ecFScyM2i*42TTpWo&vGOrgm7pl zV3h4^ zt51PTzp&uB18QUy=bC~r9u28oOS>L$>N}8&42lBy;4#c0*&k>@b*OP-(P7Dmi~%9D zP{53?ZhRK(%e}|&8$BqO#B=M*wF8OvEGT*MM2jCSF&GUO6IM{@1q2X=ptVZ5iZbL# z`Qd8NU|siz!?aCTkYpF-l1_tW8~a$1+1X`M-U!7Lz{#Q{6=1&xNd;ASXx%pln=;)9 zLWiDVD3~2R=ZEZdzE}`*5xv2t4vN5zb5UMzrGKyup`ifs}3ZXiew846beGez`pYuJZ?JbE2iIG*YthiD>vj$%l($(7oisMjN`I%}HIV5sx+=k!WBu{tLP*shmU#w0wEWI}X zC1XAJf^)uhcJH$;JREwfhS6>wK(t|W4CJ^n?(C}scs~`eHwm6-YioO}zqraY+9H;~ zqe5s2Y6EY8*evbAxVR4W;ctSkncYF}!^6eA*cr#-^;km`<1I3dNq!V6&C8OBsECQl zXNM6YpC_iIh!I?wBOtx_JVYh|*OSwce*xg)_HTB`wX|f@-x-lUEQm0>Cm%+=2+9Na zjcDU@(K7ULH{=t66|?I!+aTG#Y9rXns0TXwhd}hT-TpZ+`T0VT`sdW^x9{rKx;v;@ zMKnJIdhz7m!u(X#^X8B<@I1>6vrti(O2@B49|zy6(Jcf$)fe|ZGC$u`Epvc)&-TVA;`^)B>kUU`G%@>2XO+QK))S(^ z!OSvE90FBQ)Sw%FvH;sxv_giIbrc2hH?S10_<;n$WCc5wo`lR+cqKbfbz{ZcR^B+| zll4?s%AmHZB=rZ$Dg1ZCl1#6tv|==v`T{pH)O&Bu?+X<#pK3%5C^7UKJJl;d&Guo; zU{e@LcaYI&l?`;r*USuN!;ab6_#=js3~?fR_k)s-+2>xO`Rv`I*KWx{Y$?9lvMuUN zN&Z;c7xvOrIhb=j>78?Um!IHS%0?>Sba0RkjTV~Zt0}r1lTPIrSQXpOeUy@vGGrRkdCVH+zA~bu*h;s$Gz3?d($JS^*-}r-%(pYcV}l+TQmmxBq2hb zeJ-?WNyF-=Lg>KL!x!KJrY5ncTP-TQS)lHqJCRSbW!IS2eXrf-B(dWSE64RPcgg?A zFN%;kPfp%g!$4?*v4_EB;}yeo zB#nPgFOpWKib;6&t;sNlSl+$cF_y2tZg;H#@6Hv0)(LE>tGpLFp?eZE4S%}1tGSVu z*pamt7$(1;sjI8^mWJyzjwK|&vep-7o#D>yPb=cu-J0(VwMer>}}RI|cZ1uN#$$U;9-+LIuz8Ru%(mmOLhI){=koa8lUT~F^#XHgIFSA`Ue2Fh!C=*NV&w&in^6KVTetq%t z0pMSK25Fpt;9vG8*DEr$=-K8*t5{hCwkrw$81!9Z|Fgtaw@{e=ZGl&%K|X9#M7DMz z*_O^}Yk>}V5CHPz4d2~4i#o2f@)8q2xbVPRGmh!8;+GU*aX!P6H&t|%>eu_7u1;X0 zo7y{$6@Kl4jfv-R(^Ygb!y7Ud2p%Ee&bpJ_B8Y_WF0kwkNP&kD;;_W}6Jo;>;!ZPl z@|E-uj$psa5{$WDd}plCL<7o8d16;6E0739VWVS#aRCg!7v?E&Bm#pBoTfJl?diKa z%vl}>dDpL*InlA2WlZ9d&;2@m?YC=KxODU4pC?xzP!L0oNoHCbxPO=B;S6R%$hqd= zKIRe_-xmX^ol=NI>$|}I+%|oB&pD?K>uwA429y!;JzdaLR8-WsbDNGz(F%di4hOJn z$)1F*_(Bp++FA=+IEw&g81-FC%XXrRqE~%7pD}$D&7W_XncraBq#wm2XS0F;IwzaB zxDUXrt*0NuZ+A;eOJ^IP zil7klB)cveSr8hoXDr-wOrp@hhqRL7A@1jose!u8a!v+6z`_FU@wD1^kCq1~pnF}j zAhg<2*V3Go4IWzSx>H1KYvMl9H02P%!Gz@xIGK^Gh55(IT?|5nkTaZaAAQ z$;ikWAVB{@O`c#{4ez~YQvHgw#klY= zyqEksoF2X?d$FT91|DP0q4f*{Pu|E%lmzbt|{|6sdg`r$2 zEZjh}fBpQEbdPL-+yDCqs#9Piz_kC@&$;LIuYbcK&k0V8s!P_@ z|8CQH<01&R|7VlX&{e_xG>9S(g*W!^cF9;}5dZt%@OJ}LwpcrrgYbW}^BAm8 z8+@a4jQihqX3z(vrlwxpDR^GW0T!UAUYuO04y2&akDU`;abjT4J#XDFR8#sWSW)IR zz#xO@0S<9~13va9=0&hgh3p0|dxOPm2U^ZTmaP#IAJqR_53DdV4hpn_PHv`Fw&K8B zwO6+5Gxb8g`x}8M(^?58S~VM(tq!)|Kfe@m|BWi}`ekbmc#ZbcRUhvu)w-|d0$=ZO zB4M(Jx=>#^_~Lhd843NTvxQ{lar`#D7xT1pqIOC8i1&~2*eEa?o#n$`OO8R*S{OX2 z+FfCS*QX~(x4~Y0*mzf~WqDlvw3vie)R~d>^5r>0tV>ADi^^!ycW}gr*h2n}H&DI? zhlU=u%83{0?2{vT*bH!s_s#iRh>z>+oAVjk6Jy}K8-y}>Wx6=0#kM~uO5Y{*?Z=TK zy_r67)5uZ?+YMs&ab=3q+1-#KMZ7+p7xQ{*Uwj&}v~;e!1mn7KA*igZEH#l^|9jz` z4Y@{tDEl#4jxz5PQd4<&1q?h1p+<0fES=Kw^75o!9_$iYU{)J2%QZf4w%!OR{Q?Bt zjqTDX4{uJR6%o`kWUZ<`h-XXweK`%dpMlC*3+IP^i6sczh*w=S9hiG++6t02`{MD< z2OdzWDebH0QldVYXE>z<0fKRk2l1L3*?ge|NTA{#nn^An42-@v>z_H9dBv)JO2x)m zvf^L?Kne^Ij1M!bK&w5*REy=OF+I-HSD)QE@A^QraP#V5aG*D)#1#R=rN@itpvvqo z>`{4MsX40HH$-PZC093*T zu=P$UIJFUFjrqtS=p!{;M`Apr!C;D zt>oMAc&>&0!RN9}z8!@%z@aYokCwf_v>AMHk?Y?so^94QR@LrMa?Uw==-_*g?|f7E znID*bZ!OkY1Cok+*yn3UvFo4fK8>x^39y$t&w5JMlBE;hM z)$H7+1gw=c-=1Ce=VJbJA@*lq*!Q#Z$;_zEn8V+ruBMiCd+n`lx!J#b+BPyjToALR zK4^tJQt5W!vXXKT`@=<7*A@imMLh}BM=By4<~2NbZMzDTy}1`CJL^lJIkKz*Fyb2X zP>N|_v1|)0mS4|Y Date: Sat, 6 May 2023 18:35:23 +0800 Subject: [PATCH 15/26] Refactor/chat ray (#23) * [chat] lora add todo * [chat] remove unused pipeline strategy * [chat] refactor example structure --- .../ray/1mmt_dummy.py | 3 +- .../ray/mmmt_dummy.py | 3 +- applications/Chat/coati/models/lora.py | 2 +- .../Chat/coati/ray/pipeline_strategy.py | 100 ------------------ applications/Chat/examples/ray/.gitignore | 1 - applications/Chat/examples/ray/benchmark.sh | 39 ------- 6 files changed, 5 insertions(+), 143 deletions(-) rename applications/Chat/{examples => benchmarks}/ray/1mmt_dummy.py (98%) rename applications/Chat/{examples => benchmarks}/ray/mmmt_dummy.py (98%) delete mode 100644 applications/Chat/coati/ray/pipeline_strategy.py delete mode 100644 applications/Chat/examples/ray/.gitignore delete mode 100644 applications/Chat/examples/ray/benchmark.sh diff --git a/applications/Chat/examples/ray/1mmt_dummy.py b/applications/Chat/benchmarks/ray/1mmt_dummy.py similarity index 98% rename from applications/Chat/examples/ray/1mmt_dummy.py rename to applications/Chat/benchmarks/ray/1mmt_dummy.py index eba5213a83d3..47985d5c00aa 100644 --- a/applications/Chat/examples/ray/1mmt_dummy.py +++ b/applications/Chat/benchmarks/ray/1mmt_dummy.py @@ -63,7 +63,8 @@ def model_fn(): critic_cfg = AutoConfig.from_pretrained(args.critic_pretrain) actor = get_actor_from_args(args.model, config=actor_cfg).requires_grad_(False).half().cuda() critic = get_critic_from_args(args.critic_model, config=critic_cfg).requires_grad_(False).half().cuda() - reward_model = get_reward_model_from_args(args.critic_model, config=critic_cfg).requires_grad_(False).half().cuda() + reward_model = get_reward_model_from_args(args.critic_model, + config=critic_cfg).requires_grad_(False).half().cuda() if args.initial_model_quant_ckpt is not None and args.model == 'llama': # quantize initial model with low_resource_init(), no_init_weights(): diff --git a/applications/Chat/examples/ray/mmmt_dummy.py b/applications/Chat/benchmarks/ray/mmmt_dummy.py similarity index 98% rename from applications/Chat/examples/ray/mmmt_dummy.py rename to applications/Chat/benchmarks/ray/mmmt_dummy.py index 082f4851777e..a72eb9bb87de 100644 --- a/applications/Chat/examples/ray/mmmt_dummy.py +++ b/applications/Chat/benchmarks/ray/mmmt_dummy.py @@ -63,7 +63,8 @@ def model_fn(): critic_cfg = AutoConfig.from_pretrained(args.critic_pretrain) actor = get_actor_from_args(args.model, config=actor_cfg).requires_grad_(False).half().cuda() critic = get_critic_from_args(args.critic_model, config=critic_cfg).requires_grad_(False).half().cuda() - reward_model = get_reward_model_from_args(args.critic_model, config=critic_cfg).requires_grad_(False).half().cuda() + reward_model = get_reward_model_from_args(args.critic_model, + config=critic_cfg).requires_grad_(False).half().cuda() if args.initial_model_quant_ckpt is not None and args.model == 'llama': # quantize initial model with low_resource_init(), no_init_weights(): diff --git a/applications/Chat/coati/models/lora.py b/applications/Chat/coati/models/lora.py index bd9cf3320818..2a9059e6901e 100644 --- a/applications/Chat/coati/models/lora.py +++ b/applications/Chat/coati/models/lora.py @@ -62,7 +62,7 @@ def T(w): # Make sure that the weights are not merged if self.r > 0: if not hasattr(self, "lora_A") or not hasattr(self, "lora_B"): - # csric: temporary fix + # FIXME(csric): temporary fix self.lora_A = nn.Parameter(self.weight.new_empty((self.r, self.in_features))) self.lora_B = nn.Parameter(self.weight.new_empty((self.out_features, self.r))) self.reset_parameters() diff --git a/applications/Chat/coati/ray/pipeline_strategy.py b/applications/Chat/coati/ray/pipeline_strategy.py deleted file mode 100644 index 4b01a45b176e..000000000000 --- a/applications/Chat/coati/ray/pipeline_strategy.py +++ /dev/null @@ -1,100 +0,0 @@ -# WIP - -import os -import random -from functools import partial - -import numpy as np -import torch -from coati.models.base import Actor, Critic, RewardModel -from coati.trainer.strategies import NaiveStrategy, Strategy -from torch._C._distributed_rpc import _is_current_rpc_agent_set - -import colossalai -from colossalai.fx import ColoTracer -from colossalai.fx.passes.adding_split_node_pass import balanced_split_pass, split_with_split_nodes_pass -from colossalai.pipeline.middleware.adaptor import get_fx_topology -from colossalai.pipeline.pipeline_process_group import ppg -from colossalai.pipeline.rpc._pipeline_schedule import OneFOneBPipelineEngine - -rpc_is_initialized = _is_current_rpc_agent_set - - -class PipelineModel(torch.nn.Module): - ''' - Actor has 2 kinds of jobs: forward and generate. - better to just pipelinize the inner model - ''' - - def __init__( - self, - model: torch.nn.Module, - stage_num: int, - num_microbatches: int, - data_kwargs=None, - ): - super().__init__() - - # create partition module - def create_partition_module(pp_rank: int, stage_num: int, model, data_kwargs): - model.eval() - tracer = ColoTracer() - meta_args = {k: v.to('meta') for k, v in data_kwargs.items()} - graph = tracer.trace(root=model, meta_args=meta_args) - gm = torch.fx.GraphModule(model, graph, model.__class__.__name__) - annotated_model = balanced_split_pass(gm, stage_num) - top_module, split_submodules = split_with_split_nodes_pass(annotated_model, merge_output=True) - topo = get_fx_topology(top_module) - for submodule in split_submodules: - if isinstance(submodule, torch.fx.GraphModule): - setattr(submodule, '_topo', topo) - return split_submodules[pp_rank + 1] - - def partition(model, data_kwargs: dict, pp_rank: int, chunk: int, stage_num: int): - partition = create_partition_module(pp_rank, stage_num, model, data_kwargs) - return partition - - self.inference_engine = OneFOneBPipelineEngine( - partition_fn=partial(partition, model, data_kwargs), - stage_num=stage_num, - num_microbatches=num_microbatches, - device='cuda', - ) - - def forward(self, **model_inputs): - return self.inference_engine.forward_backward(**model_inputs, forward_only=True) - - -class PPStrategy(NaiveStrategy): - """ - Strategy for Pipeline inference (inference only!) - - master node only - """ - - def __init__(self, seed: int = 42): - self.seed = seed - super().__init__() - - def setup_distributed(self) -> None: - colossalai.launch_from_torch({}, seed=self.seed) - ppg.set_global_info(rank=int(os.environ['RANK']), - world_size=int(os.environ['WORLD_SIZE']), - dp_degree=1, - tp_degree=1, - num_worker_threads=128, - device="cuda") - - def model_init_context(self): - return super().model_init_context() - - def setup_model(self, model: torch.nn.Module) -> torch.nn.Module: - if isinstance(model, Actor) or \ - isinstance(model, RewardModel) or \ - isinstance(model, Critic): - model.model = PipelineModel(model.model) - - def set_seed(self, seed: int) -> None: - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) diff --git a/applications/Chat/examples/ray/.gitignore b/applications/Chat/examples/ray/.gitignore deleted file mode 100644 index 4cf8dd15619e..000000000000 --- a/applications/Chat/examples/ray/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logs/* \ No newline at end of file diff --git a/applications/Chat/examples/ray/benchmark.sh b/applications/Chat/examples/ray/benchmark.sh deleted file mode 100644 index 3852684007b7..000000000000 --- a/applications/Chat/examples/ray/benchmark.sh +++ /dev/null @@ -1,39 +0,0 @@ - -PROMPT_PATH=/home/lccsr/data3/awesome-chatgpt-prompts/prompts.csv - -num_trainers=4 -num_makers=4 - -# "facebook/opt-2.7b" -for pretrain in "facebook/opt-1.3b" "facebook/opt-6.7b" "facebook/opt-13b" -do - - for experience_batch_size in 16 32 64 - do - for train_batch_size in 16 32 64 - do - for update_steps in 8 32 128 - do - # set a big enough experience_steps for twice maker-update - experience_steps=$((2*num_trainers*train_batch_size*update_steps/num_makers/experience_batch_size)) - - config_string=${num_trainers}_${num_makers}_pretrain_${pretrain##*/}_experience_batch_size_${experience_batch_size}_train_batch_size_${train_batch_size}_update_steps_${update_steps}_experience_steps_${experience_steps} - echo running: ${config_string} - - nohup python mmmt_prompt.py \ - --prompt_path $PROMPT_PATH \ - --trainer_strategy colossalai_gemini --maker_strategy naive \ - --model 'opt' \ - --pretrain $pretrain \ - --critic_pretrain "facebook/opt-350m" \ - --num_trainers $num_trainers \ - --num_makers $num_makers \ - --experience_steps $experience_steps \ - --experience_batch_size $experience_batch_size \ - --update_steps $update_steps \ - --train_batch_size $train_batch_size \ - --debug > logs/output_${config_string}.txt 2>&1 - done - done - done -done \ No newline at end of file From edba47e01e4702b6cc62d2c9046affcb0fd55aba Mon Sep 17 00:00:00 2001 From: ver217 Date: Mon, 8 May 2023 14:06:06 +0800 Subject: [PATCH 16/26] [chat] setup ci for ray --- applications/Chat/examples/ray/test_ci.sh | 8 ++++++++ applications/Chat/examples/test_ci.sh | 2 ++ 2 files changed, 10 insertions(+) create mode 100755 applications/Chat/examples/ray/test_ci.sh diff --git a/applications/Chat/examples/ray/test_ci.sh b/applications/Chat/examples/ray/test_ci.sh new file mode 100755 index 000000000000..416458dceee4 --- /dev/null +++ b/applications/Chat/examples/ray/test_ci.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set -xe + +export RAY_NAMESPACE=admin +export DATA=/data/scratch/chatgpt/prompts.csv + +python mmmt_prompt.py --prompt_path $DATA --num_makers 2 --num_trainers 2 --trainer_strategy colossalai_gemini --model opt --critic_model opt --pretrain facebook/opt-350m --critic_pretrain facebook/opt-125m --experience_batch_size 4 --train_batch_size 2 diff --git a/applications/Chat/examples/test_ci.sh b/applications/Chat/examples/test_ci.sh index 2b049163c801..0f34921e3504 100755 --- a/applications/Chat/examples/test_ci.sh +++ b/applications/Chat/examples/test_ci.sh @@ -124,3 +124,5 @@ torchrun --standalone --nproc_per_node=2 ${BASE}/train_prompts.py --prompt_datas rm -rf ${BASE}/rm_ckpt_gpt.pt rm -rf ${BASE}/actor_checkpoint_prompts.pt + +cd ray && bash test_ci.sh && cd .. From 12862400c29ab31bdb50774d870621e9e6c21a76 Mon Sep 17 00:00:00 2001 From: csric <59389055+CsRic@users.noreply.github.com> Date: Wed, 10 May 2023 11:19:09 +0800 Subject: [PATCH 17/26] [chat-ray] Support LoRA trainer. LoRA weights reconstruction. (#24) * lora support prototype * lora support * 1mmt lora & remove useless code --------- Co-authored-by: csric --- .../Chat/coati/ray/detached_trainer_ppo.py | 28 ++-- .../Chat/coati/ray/experience_maker_holder.py | 32 ++++- .../Chat/coati/ray/lora_constructor.py | 122 ++++++++++++++++++ applications/Chat/coati/ray/utils.py | 27 ---- applications/Chat/examples/ray/1mmt_prompt.py | 2 + applications/Chat/examples/ray/mmmt_prompt.py | 5 +- 6 files changed, 175 insertions(+), 41 deletions(-) create mode 100644 applications/Chat/coati/ray/lora_constructor.py diff --git a/applications/Chat/coati/ray/detached_trainer_ppo.py b/applications/Chat/coati/ray/detached_trainer_ppo.py index d3dfc6e93a46..5d84b58ca0e3 100644 --- a/applications/Chat/coati/ray/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/detached_trainer_ppo.py @@ -13,6 +13,7 @@ from .callbacks import TrainerCallback, TrainerPerformanceEvaluator from .detached_trainer_base import DetachedTrainer +from .lora_constructor import LoRAConstructor from .utils import ( get_actor_from_args, get_critic_from_args, @@ -67,6 +68,7 @@ def __init__( callbacks: List[TrainerCallback] = [], eval_performance: bool = False, debug: bool = False, + update_lora_weights: bool = False, ) -> None: # set environment variables if env_info: @@ -106,6 +108,8 @@ def __init__( if self._debug: print(f'[trainer{get_rank()}] will send state dict to {experience_maker_holder_name_list}') + self._update_lora_weights = update_lora_weights + @ray.method(concurrency_group="model_io") @torch.no_grad() def _update_remote_makers(self, fully_update: bool = False, **config): @@ -121,16 +125,18 @@ def _update_remote_makers(self, fully_update: bool = False, **config): # sending loop tasks = [] - for state_dict_shard in self._get_model_state_dict_shard(self.actor, **config): + for state_dict_shard in self._get_model_state_dict_shard(self.actor, fully_update = fully_update, **config): for target_holder in self.target_holder_list: tasks.append( target_holder.update_experience_maker.remote(new_actor_state_dict=state_dict_shard, + new_actor_lora_config_dict=self._get_model_lora_config_dict(self.actor), fully_update=fully_update)) # sending loop - for state_dict_shard in self._get_model_state_dict_shard(self.critic, **config): + for state_dict_shard in self._get_model_state_dict_shard(self.critic, fully_update = fully_update, **config): for target_holder in self.target_holder_list: tasks.append( target_holder.update_experience_maker.remote(new_critic_state_dict=state_dict_shard, + new_critic_lora_config_dict=self._get_model_lora_config_dict(self.critic), fully_update=fully_update)) ray.get(tasks) # mark end @@ -177,10 +183,16 @@ def strategy_save_actor_optim(self, path: str, only_rank0: bool = False) -> None def strategy_save_critic_optim(self, path: str, only_rank0: bool = False) -> None: self.strategy.save_optimizer(self.critic_optim, path, only_rank0) - def _get_model_state_dict_shard(self, model: torch.nn.Module, **config): - # try: - # self.strategy.merge_lora_weight(model) - # except AttributeError: - # pass + def _get_model_state_dict_shard(self, model: torch.nn.Module, fully_update = False, **config): for state_dict in self.strategy.get_model_state_dict_shard(model, **config): - yield state_dict_to(state_dict) + if not self._update_lora_weights or fully_update: + yield state_dict_to(state_dict) + else: + state_dict_lora, _ = LoRAConstructor.filter_state_dict_lora(state_dict) + yield state_dict_to(state_dict_lora) + + def _get_model_lora_config_dict(self, model: torch.nn.Module): + if not self._update_lora_weights: + return None + unwrapped_model = self.strategy.unwrap_model(model) + return LoRAConstructor.extract_lora_config(unwrapped_model) \ No newline at end of file diff --git a/applications/Chat/coati/ray/experience_maker_holder.py b/applications/Chat/coati/ray/experience_maker_holder.py index 573771ad6258..8551ef1eacef 100644 --- a/applications/Chat/coati/ray/experience_maker_holder.py +++ b/applications/Chat/coati/ray/experience_maker_holder.py @@ -19,8 +19,13 @@ from tqdm import tqdm from .callbacks import ExperienceMakerPerformanceEvaluator, MakerCallback -from .utils import get_model_numel, get_rank, get_world_size, is_rank_0, set_dist_env - +from .utils import (get_model_numel, + get_rank, + get_world_size, + is_rank_0, + set_dist_env, + state_dict_to) +from .lora_constructor import LoRAConstructor @ray.remote(concurrency_groups={"experience_io": 1, "model_io": 1, "compute": 1}) class ExperienceMakerHolder: @@ -45,6 +50,7 @@ def __init__( callbacks: List[MakerCallback] = [], eval_performance: bool = False, debug: bool = False, + update_lora_weights: bool = False, **generate_kwargs): # set environment variables if env_info: @@ -77,6 +83,11 @@ def __init__( self._is_fully_initialized = not sync_models_from_trainers self._debug = debug + self._update_lora_weights = update_lora_weights + if self._update_lora_weights: + self.actor_lora_constructor = LoRAConstructor() + self.critic_lora_constructor = LoRAConstructor() + self.target_auto_balance = False self._target_idx = 0 @@ -166,7 +177,9 @@ def workingloop(self, dataloader_fn: Callable[[], Iterable], num_epochs: int = 1 @ray.method(concurrency_group="model_io") def update_experience_maker(self, new_actor_state_dict: Dict[str, Any] = None, + new_actor_lora_config_dict: Dict[str, Any] = None, new_critic_state_dict: Dict[str, Any] = None, + new_critic_lora_config_dict: Dict[str, Any] = None, fully_update: bool = False, chunk_start: bool = None, chunk_end: bool = None): @@ -188,10 +201,19 @@ def update_experience_maker(self, with torch.no_grad(): if new_actor_state_dict is not None: - self.experience_maker.actor.model.load_state_dict(new_actor_state_dict, strict=False) + if not self._update_lora_weights or fully_update: + self.experience_maker.actor.model.load_state_dict(new_actor_state_dict, strict=False) + else: + new_actor_state_dict = state_dict_to(new_actor_state_dict, device=torch.cuda.current_device()) + state_dict_increasae = self.actor_lora_constructor.reconstruct_increase(new_actor_state_dict, new_actor_lora_config_dict) + self.actor_lora_constructor.load_state_dict_increase(self.experience_maker.actor.model, state_dict_increasae) if new_critic_state_dict is not None: - self.experience_maker.critic.load_state_dict(new_critic_state_dict, strict=False) - + if not self._update_lora_weights or fully_update: + self.experience_maker.critic.load_state_dict(new_critic_state_dict, strict=False) + else: + new_critic_state_dict = state_dict_to(new_critic_state_dict, device=torch.cuda.current_device()) + state_dict_increasae = self.critic_lora_constructor.reconstruct_increase(new_critic_state_dict, new_critic_lora_config_dict) + self.critic_lora_constructor.load_state_dict_increase(self.experience_maker.critic, state_dict_increasae) # the lock must be released after both actor and critic being updated if chunk_end: diff --git a/applications/Chat/coati/ray/lora_constructor.py b/applications/Chat/coati/ray/lora_constructor.py new file mode 100644 index 000000000000..599a58248728 --- /dev/null +++ b/applications/Chat/coati/ray/lora_constructor.py @@ -0,0 +1,122 @@ +from typing import Any, Callable, Dict, List, Optional +from collections import OrderedDict +from dataclasses import dataclass + +import torch +import torch.nn as nn +from loralib.layers import LoRALayer +from coati.models.lora import LoraLinear + + +@dataclass +class LoRAConfig: + r: int = 0 + lora_alpha: int = 1 + lora_dropout: float = 0 + fan_in_fan_out: bool = False + + +class LoRAConstructor: + ''' + Tools for reconstructing a model from a remote LoRA model. + (Transfering only LoRA data costs much less!) + Usage: + Step 1 (Sender): + filter_state_dict_lora() + + Step 2 (Sender, Optional): + extract_lora_config() + + Step 3 (Sender): + send state_dict_lora and lora_config_dict + + Step 4 (Receiver): + reconstruct_increase() + + Step 5 (Receiver): + load_state_dict_increase() + + ''' + + def __init__(self): + self.lora_config_dict = None + + def register_lora_config(self, lora_config_dict: Dict[str, Any]): + self.lora_config_dict = lora_config_dict + + def reconstruct_increase(self, state_dict_lora: Dict[str, Any], lora_config_dict: Dict[str, Any]): + ''' + xxx.lora_A, xxx.lora_B -->> xxx.weight + Warning: the xxx.weight here is the increment actually. + ''' + if lora_config_dict is not None: + self.register_lora_config(lora_config_dict) + + state_dict_increasae = OrderedDict() + config_iter = iter(self.lora_config_dict.items()) + lora_A, lora_B, layer_prefix = None, None, None + for k, v in state_dict_lora.items(): + if k.rpartition('.')[-1] == 'lora_A': + lora_A = v + layer_prefix = k.rpartition('.')[0] + elif k.rpartition('.')[-1] == 'lora_B': + assert layer_prefix == k.rpartition('.')[0], "unmatched (lora_A, lora_B) pair" + layer_prefix_2, config = next(config_iter) + assert layer_prefix_2 == layer_prefix, "unmatched (state_dict, config_dict) pair" + lora_B = v + weight_data_increase = self._compute(lora_A, lora_B, config) + state_dict_increasae[layer_prefix + '.weight'] = weight_data_increase + lora_A, lora_B, layer_prefix = None, None, None + else: + raise ValueError('unexpected key') + return state_dict_increasae + + def _compute(self, lora_A, lora_B, config=LoRAConfig()): + def T(w): + return w.T if config.fan_in_fan_out else w + if config.r > 0: + scaling = config.lora_alpha / config.r + weight_data_increase = T(lora_B @ lora_A) * scaling + return weight_data_increase + return 0 + + def load_state_dict_increase(self, model: nn.Module, state_dict_increasae: Dict[str, Any]): + ''' + The final reconstruction step + ''' + # naive approach + model.load_state_dict({k: v + model.state_dict()[k] for k, v in state_dict_increasae.items()}, strict=False) + + @staticmethod + def filter_state_dict_lora(state_dict: Dict[str, Any], keep_non_lora=False): + ''' + if keep_non_lora, also return non_lora state_dict + ''' + state_dict_lora = OrderedDict() + state_dict_non_lora = OrderedDict() + for k, v in state_dict.items(): + if 'lora_A' in k or 'lora_B' in k: + state_dict_lora[k] = v + elif keep_non_lora: + state_dict_non_lora[k] = v + if keep_non_lora: + return state_dict_lora, state_dict_non_lora + else: + return state_dict_lora, None + + @staticmethod + def extract_lora_config(model: nn.Module) -> Dict[str, LoRAConfig]: + ''' + extract LoraLinear model. + return OrderedDict(): name -> LoRAConfig + ''' + lora_config_dict = OrderedDict() + + for name, child in model.named_modules(): + if isinstance(child, LoraLinear): + lora_config_dict[name] = LoRAConfig(r=child.r, + lora_alpha=child.lora_alpha, + lora_dropout=child.lora_dropout, + fan_in_fan_out=child.fan_in_fan_out) + + return lora_config_dict diff --git a/applications/Chat/coati/ray/utils.py b/applications/Chat/coati/ray/utils.py index 7e36a6b08589..4361ee236771 100644 --- a/applications/Chat/coati/ray/utils.py +++ b/applications/Chat/coati/ray/utils.py @@ -150,30 +150,3 @@ def state_dict_to(state_dict: Dict[str, Any], for k, v in state_dict.items(): new_state_dict[k] = v.to(dtype=dtype, device=device) return new_state_dict - - -def state_dict_filter_lora(state_dict: Dict[str, Any], keep_non_lora = False): - ''' - if keep_non_lora, also return non_lora state_dict - ''' - state_dict_lora = OrderedDict() - state_dict_non_lora = OrderedDict() - for k, v in state_dict: - if 'lora_A' in k or 'lora_B' in k: - state_dict_lora[k] = v - elif keep_non_lora: - state_dict_non_lora[k] = v - if keep_non_lora: - return state_dict_lora, state_dict_non_lora - else: - return state_dict_lora - - -def state_dict_lora_reconstruct(state_dict_lora: Dict[str, Any]): - ''' - xxx.lora_A, xxx.lora_B -->> xxx.weight - TODO - ''' - state_dict_reconstruct = OrderedDict() - - \ No newline at end of file diff --git a/applications/Chat/examples/ray/1mmt_prompt.py b/applications/Chat/examples/ray/1mmt_prompt.py index bd7224aae749..06e522962f0e 100644 --- a/applications/Chat/examples/ray/1mmt_prompt.py +++ b/applications/Chat/examples/ray/1mmt_prompt.py @@ -73,6 +73,7 @@ def trainer_model_fn(): buffer_limit=16, eval_performance=True, debug=args.debug, + update_lora_weights = not (args.lora_rank == 0), ) for i, env_info_trainer in enumerate(env_info_trainers) ] @@ -100,6 +101,7 @@ def model_fn(): experience_batch_size=args.experience_batch_size, kl_coef=0.1, debug=args.debug, + update_lora_weights = not (args.lora_rank == 0), # sync_models_from_trainers=True, # generation kwargs: max_length=512, diff --git a/applications/Chat/examples/ray/mmmt_prompt.py b/applications/Chat/examples/ray/mmmt_prompt.py index 6f43d8950758..941e080ebb5e 100644 --- a/applications/Chat/examples/ray/mmmt_prompt.py +++ b/applications/Chat/examples/ray/mmmt_prompt.py @@ -20,7 +20,6 @@ from transformers import AutoConfig, AutoTokenizer from transformers.modeling_utils import no_init_weights - def get_free_port(): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('', 0)) @@ -86,6 +85,7 @@ def model_fn(): env_info=env_info_maker, kl_coef=0.1, debug=args.debug, + update_lora_weights = not (args.lora_rank == 0), # sync_models_from_trainers=True, # generation kwargs: max_length=512, @@ -119,6 +119,7 @@ def trainer_model_fn(): buffer_limit=16, eval_performance=True, debug=args.debug, + update_lora_weights = not (args.lora_rank == 0), ) for i, env_info_trainer in enumerate(env_info_trainers) ] @@ -156,6 +157,7 @@ def tokenize_fn(texts): for trainer_ref in trainer_refs: wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) + ray.get(wait_tasks) @@ -187,5 +189,6 @@ def tokenize_fn(texts): parser.add_argument('--quant_group_size', type=int, default=128) parser.add_argument('--debug', action='store_true') args = parser.parse_args() + ray.init(namespace=os.environ["RAY_NAMESPACE"]) main(args) From d6977623e275977a75359f4153efaeae90a54e9d Mon Sep 17 00:00:00 2001 From: ver217 Date: Mon, 15 May 2023 14:09:33 +0800 Subject: [PATCH 18/26] [chat] fix test ci for ray --- applications/Chat/examples/test_ci.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/Chat/examples/test_ci.sh b/applications/Chat/examples/test_ci.sh index 0f34921e3504..57034aa9989e 100755 --- a/applications/Chat/examples/test_ci.sh +++ b/applications/Chat/examples/test_ci.sh @@ -125,4 +125,4 @@ rm -rf ${BASE}/rm_ckpt_gpt.pt rm -rf ${BASE}/actor_checkpoint_prompts.pt -cd ray && bash test_ci.sh && cd .. +cd ${BASE}/ray && bash test_ci.sh && cd ${BASE} From a18ac6a781891ba677e75ba3f222cfb5bed236eb Mon Sep 17 00:00:00 2001 From: ver217 Date: Mon, 15 May 2023 14:45:39 +0800 Subject: [PATCH 19/26] [chat] fix test ci requirements for ray --- applications/Chat/examples/ray/requirements.txt | 1 + applications/Chat/examples/ray/test_ci.sh | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 applications/Chat/examples/ray/requirements.txt diff --git a/applications/Chat/examples/ray/requirements.txt b/applications/Chat/examples/ray/requirements.txt new file mode 100644 index 000000000000..e0275631807f --- /dev/null +++ b/applications/Chat/examples/ray/requirements.txt @@ -0,0 +1 @@ +ray diff --git a/applications/Chat/examples/ray/test_ci.sh b/applications/Chat/examples/ray/test_ci.sh index 416458dceee4..895f7de0fea9 100755 --- a/applications/Chat/examples/ray/test_ci.sh +++ b/applications/Chat/examples/ray/test_ci.sh @@ -1,8 +1,12 @@ #!/bin/bash set -xe +BASE=$(realpath $(dirname $0)) export RAY_NAMESPACE=admin export DATA=/data/scratch/chatgpt/prompts.csv -python mmmt_prompt.py --prompt_path $DATA --num_makers 2 --num_trainers 2 --trainer_strategy colossalai_gemini --model opt --critic_model opt --pretrain facebook/opt-350m --critic_pretrain facebook/opt-125m --experience_batch_size 4 --train_batch_size 2 +# install requirements +pip install -r ${BASE}/requirements.txt + +python ${BASE}/mmmt_prompt.py --prompt_path $DATA --num_makers 2 --num_trainers 2 --trainer_strategy colossalai_gemini --model opt --critic_model opt --pretrain facebook/opt-350m --critic_pretrain facebook/opt-125m --experience_batch_size 4 --train_batch_size 2 From 767513f71de08263a36e1ecc41dd67d2b9e20b57 Mon Sep 17 00:00:00 2001 From: ver217 Date: Mon, 15 May 2023 16:31:24 +0800 Subject: [PATCH 20/26] [chat] fix ray runtime env --- .../Chat/benchmarks/ray/1mmt_dummy.py | 2 +- .../Chat/benchmarks/ray/mmmt_dummy.py | 2 +- applications/Chat/examples/ray/1mmt_prompt.py | 52 ++++++++----------- applications/Chat/examples/ray/mmmt_prompt.py | 25 ++++----- 4 files changed, 34 insertions(+), 47 deletions(-) diff --git a/applications/Chat/benchmarks/ray/1mmt_dummy.py b/applications/Chat/benchmarks/ray/1mmt_dummy.py index 47985d5c00aa..93dc27da379f 100644 --- a/applications/Chat/benchmarks/ray/1mmt_dummy.py +++ b/applications/Chat/benchmarks/ray/1mmt_dummy.py @@ -174,5 +174,5 @@ def build_dataloader(size): parser.add_argument('--quant_group_size', type=int, default=128) parser.add_argument('--debug', action='store_true') args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"]) + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": os.environ}) main(args) diff --git a/applications/Chat/benchmarks/ray/mmmt_dummy.py b/applications/Chat/benchmarks/ray/mmmt_dummy.py index a72eb9bb87de..3bd0fa10d69d 100644 --- a/applications/Chat/benchmarks/ray/mmmt_dummy.py +++ b/applications/Chat/benchmarks/ray/mmmt_dummy.py @@ -185,5 +185,5 @@ def build_dataloader(size): parser.add_argument('--quant_group_size', type=int, default=128) parser.add_argument('--debug', action='store_true') args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"]) + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": os.environ}) main(args) diff --git a/applications/Chat/examples/ray/1mmt_prompt.py b/applications/Chat/examples/ray/1mmt_prompt.py index 06e522962f0e..dd1815968ac9 100644 --- a/applications/Chat/examples/ray/1mmt_prompt.py +++ b/applications/Chat/examples/ray/1mmt_prompt.py @@ -14,13 +14,13 @@ get_critic_from_args, get_reward_model_from_args, get_strategy_from_args, - get_tokenizer_from_args + get_tokenizer_from_args, ) - from torch.utils.data import DataLoader from transformers import AutoConfig from transformers.modeling_utils import no_init_weights + def get_free_port(): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('', 0)) @@ -31,7 +31,8 @@ def get_local_ip(): with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: s.connect(('8.8.8.8', 80)) return s.getsockname()[0] - + + def main(args): master_addr = str(get_local_ip()) # trainer_env_info @@ -73,25 +74,25 @@ def trainer_model_fn(): buffer_limit=16, eval_performance=True, debug=args.debug, - update_lora_weights = not (args.lora_rank == 0), + update_lora_weights=not (args.lora_rank == 0), ) for i, env_info_trainer in enumerate(env_info_trainers) ] - + def model_fn(): actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() if args.initial_model_quant_ckpt is not None and args.model == 'llama': - # quantize initial model - actor_cfg = AutoConfig.from_pretrained(args.pretrain) - with low_resource_init(), no_init_weights(): - initial_model = get_actor_from_args(args.model, config=actor_cfg) - initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, - args.quant_group_size).cuda().requires_grad_(False) + # quantize initial model + actor_cfg = AutoConfig.from_pretrained(args.pretrain) + with low_resource_init(), no_init_weights(): + initial_model = get_actor_from_args(args.model, config=actor_cfg) + initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, + args.quant_group_size).cuda().requires_grad_(False) else: initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() return actor, critic, reward_model, initial_model - + # configure Experience Maker experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( detached_trainer_name_list=[f'trainer{i}' for i in range(args.num_trainers)], @@ -101,7 +102,7 @@ def model_fn(): experience_batch_size=args.experience_batch_size, kl_coef=0.1, debug=args.debug, - update_lora_weights = not (args.lora_rank == 0), + update_lora_weights=not (args.lora_rank == 0), # sync_models_from_trainers=True, # generation kwargs: max_length=512, @@ -114,40 +115,31 @@ def model_fn(): use_cache=True, ) - - # uncomment this function if sync_models_from_trainers is True # ray.get([ # trainer_ref.sync_models_to_remote_makers.remote() # for trainer_ref in trainer_refs # ]) - + wait_tasks = [] - + total_steps = args.experience_batch_size * args.experience_steps // (args.num_trainers * args.train_batch_size) for trainer_ref in trainer_refs: - wait_tasks.append( - trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) + wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) - dataset_size = args.experience_batch_size * 4 def build_dataloader(): + def tokenize_fn(texts): batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) return {k: v.cuda() for k, v in batch.items()} dataset = pd.read_csv(args.prompt_path)['prompt'] - dataloader = DataLoader(dataset=dataset, - batch_size=dataset_size, - shuffle=True, - collate_fn=tokenize_fn - ) + dataloader = DataLoader(dataset=dataset, batch_size=dataset_size, shuffle=True, collate_fn=tokenize_fn) return dataloader - - wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, - num_steps=args.experience_steps)) + wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, num_steps=args.experience_steps)) ray.get(wait_tasks) @@ -179,5 +171,5 @@ def tokenize_fn(texts): parser.add_argument('--quant_group_size', type=int, default=128) parser.add_argument('--debug', action='store_true') args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"]) - main(args) \ No newline at end of file + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": os.environ}) + main(args) diff --git a/applications/Chat/examples/ray/mmmt_prompt.py b/applications/Chat/examples/ray/mmmt_prompt.py index 941e080ebb5e..8d383d2822fd 100644 --- a/applications/Chat/examples/ray/mmmt_prompt.py +++ b/applications/Chat/examples/ray/mmmt_prompt.py @@ -3,9 +3,9 @@ import socket from functools import partial +import pandas as pd import ray import torch -import pandas as pd from coati.quant import llama_load_quant, low_resource_init from coati.ray.detached_trainer_ppo import DetachedPPOTrainer from coati.ray.experience_maker_holder import ExperienceMakerHolder @@ -20,6 +20,7 @@ from transformers import AutoConfig, AutoTokenizer from transformers.modeling_utils import no_init_weights + def get_free_port(): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('', 0)) @@ -85,7 +86,7 @@ def model_fn(): env_info=env_info_maker, kl_coef=0.1, debug=args.debug, - update_lora_weights = not (args.lora_rank == 0), + update_lora_weights=not (args.lora_rank == 0), # sync_models_from_trainers=True, # generation kwargs: max_length=512, @@ -119,24 +120,21 @@ def trainer_model_fn(): buffer_limit=16, eval_performance=True, debug=args.debug, - update_lora_weights = not (args.lora_rank == 0), + update_lora_weights=not (args.lora_rank == 0), ) for i, env_info_trainer in enumerate(env_info_trainers) ] dataset_size = args.experience_batch_size * 4 - + def build_dataloader(): + def tokenize_fn(texts): batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) return {k: v.cuda() for k, v in batch.items()} dataset = pd.read_csv(args.prompt_path)['prompt'] - dataloader = DataLoader(dataset=dataset, - batch_size=dataset_size, - shuffle=True, - collate_fn=tokenize_fn - ) + dataloader = DataLoader(dataset=dataset, batch_size=dataset_size, shuffle=True, collate_fn=tokenize_fn) return dataloader # uncomment this function if sync_models_from_trainers is True @@ -148,16 +146,13 @@ def tokenize_fn(texts): wait_tasks = [] for experience_holder_ref in experience_holder_refs: - wait_tasks.append( - experience_holder_ref.workingloop.remote(build_dataloader, - num_steps=args.experience_steps)) + wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, num_steps=args.experience_steps)) total_steps = args.experience_batch_size * args.experience_steps * \ args.num_makers // (args.num_trainers * args.train_batch_size) for trainer_ref in trainer_refs: wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) - ray.get(wait_tasks) @@ -189,6 +184,6 @@ def tokenize_fn(texts): parser.add_argument('--quant_group_size', type=int, default=128) parser.add_argument('--debug', action='store_true') args = parser.parse_args() - - ray.init(namespace=os.environ["RAY_NAMESPACE"]) + + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": os.environ}) main(args) From 03a5a290593fd94b0a4146989c40d60ceff622c6 Mon Sep 17 00:00:00 2001 From: ver217 Date: Mon, 15 May 2023 17:20:13 +0800 Subject: [PATCH 21/26] [chat] fix ray runtime env --- applications/Chat/benchmarks/ray/1mmt_dummy.py | 2 +- applications/Chat/benchmarks/ray/mmmt_dummy.py | 2 +- applications/Chat/examples/ray/1mmt_prompt.py | 2 +- applications/Chat/examples/ray/mmmt_prompt.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/Chat/benchmarks/ray/1mmt_dummy.py b/applications/Chat/benchmarks/ray/1mmt_dummy.py index 93dc27da379f..9e8f36cefc4f 100644 --- a/applications/Chat/benchmarks/ray/1mmt_dummy.py +++ b/applications/Chat/benchmarks/ray/1mmt_dummy.py @@ -174,5 +174,5 @@ def build_dataloader(size): parser.add_argument('--quant_group_size', type=int, default=128) parser.add_argument('--debug', action='store_true') args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": os.environ}) + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)}) main(args) diff --git a/applications/Chat/benchmarks/ray/mmmt_dummy.py b/applications/Chat/benchmarks/ray/mmmt_dummy.py index 3bd0fa10d69d..46a0062893b8 100644 --- a/applications/Chat/benchmarks/ray/mmmt_dummy.py +++ b/applications/Chat/benchmarks/ray/mmmt_dummy.py @@ -185,5 +185,5 @@ def build_dataloader(size): parser.add_argument('--quant_group_size', type=int, default=128) parser.add_argument('--debug', action='store_true') args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": os.environ}) + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)}) main(args) diff --git a/applications/Chat/examples/ray/1mmt_prompt.py b/applications/Chat/examples/ray/1mmt_prompt.py index dd1815968ac9..afdd6a922cc7 100644 --- a/applications/Chat/examples/ray/1mmt_prompt.py +++ b/applications/Chat/examples/ray/1mmt_prompt.py @@ -171,5 +171,5 @@ def tokenize_fn(texts): parser.add_argument('--quant_group_size', type=int, default=128) parser.add_argument('--debug', action='store_true') args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": os.environ}) + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)}) main(args) diff --git a/applications/Chat/examples/ray/mmmt_prompt.py b/applications/Chat/examples/ray/mmmt_prompt.py index 8d383d2822fd..fa7b2bd7edfd 100644 --- a/applications/Chat/examples/ray/mmmt_prompt.py +++ b/applications/Chat/examples/ray/mmmt_prompt.py @@ -185,5 +185,5 @@ def tokenize_fn(texts): parser.add_argument('--debug', action='store_true') args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": os.environ}) + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)}) main(args) From 1fafa84185d44192814fbd767947c873c5282160 Mon Sep 17 00:00:00 2001 From: ver217 Date: Tue, 16 May 2023 11:11:25 +0800 Subject: [PATCH 22/26] [chat] fix example ci docker args --- .github/workflows/run_chatgpt_examples.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run_chatgpt_examples.yml b/.github/workflows/run_chatgpt_examples.yml index 9d9d3a007851..129bf7ed3270 100644 --- a/.github/workflows/run_chatgpt_examples.yml +++ b/.github/workflows/run_chatgpt_examples.yml @@ -20,7 +20,7 @@ jobs: runs-on: [self-hosted, gpu] container: image: hpcaitech/pytorch-cuda:1.12.0-11.3.0 - options: --gpus all --rm -v /data/scratch/github_actions/chat:/data/scratch/github_actions/chat + options: --gpus all --rm -v /data/scratch/github_actions/chat:/data/scratch/github_actions/chat --shm-size=10.24gb timeout-minutes: 30 defaults: run: From 86ad98cbdecc2ba6b2535f06047d5a37bf346ee8 Mon Sep 17 00:00:00 2001 From: ver217 Date: Tue, 16 May 2023 11:28:22 +0800 Subject: [PATCH 23/26] [chat] add debug info in trainer --- .../Chat/coati/ray/detached_trainer_ppo.py | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/applications/Chat/coati/ray/detached_trainer_ppo.py b/applications/Chat/coati/ray/detached_trainer_ppo.py index 5d84b58ca0e3..7dc74c8ac8ca 100644 --- a/applications/Chat/coati/ray/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/detached_trainer_ppo.py @@ -70,6 +70,9 @@ def __init__( debug: bool = False, update_lora_weights: bool = False, ) -> None: + # TODO(ver217): remove debug info + import os + print(os.environ) # set environment variables if env_info: set_dist_env(env_info=env_info) @@ -109,7 +112,7 @@ def __init__( print(f'[trainer{get_rank()}] will send state dict to {experience_maker_holder_name_list}') self._update_lora_weights = update_lora_weights - + @ray.method(concurrency_group="model_io") @torch.no_grad() def _update_remote_makers(self, fully_update: bool = False, **config): @@ -125,19 +128,21 @@ def _update_remote_makers(self, fully_update: bool = False, **config): # sending loop tasks = [] - for state_dict_shard in self._get_model_state_dict_shard(self.actor, fully_update = fully_update, **config): + for state_dict_shard in self._get_model_state_dict_shard(self.actor, fully_update=fully_update, **config): for target_holder in self.target_holder_list: tasks.append( - target_holder.update_experience_maker.remote(new_actor_state_dict=state_dict_shard, - new_actor_lora_config_dict=self._get_model_lora_config_dict(self.actor), - fully_update=fully_update)) + target_holder.update_experience_maker.remote( + new_actor_state_dict=state_dict_shard, + new_actor_lora_config_dict=self._get_model_lora_config_dict(self.actor), + fully_update=fully_update)) # sending loop - for state_dict_shard in self._get_model_state_dict_shard(self.critic, fully_update = fully_update, **config): + for state_dict_shard in self._get_model_state_dict_shard(self.critic, fully_update=fully_update, **config): for target_holder in self.target_holder_list: tasks.append( - target_holder.update_experience_maker.remote(new_critic_state_dict=state_dict_shard, - new_critic_lora_config_dict=self._get_model_lora_config_dict(self.critic), - fully_update=fully_update)) + target_holder.update_experience_maker.remote( + new_critic_state_dict=state_dict_shard, + new_critic_lora_config_dict=self._get_model_lora_config_dict(self.critic), + fully_update=fully_update)) ray.get(tasks) # mark end for target_holder in self.target_holder_list: @@ -183,7 +188,7 @@ def strategy_save_actor_optim(self, path: str, only_rank0: bool = False) -> None def strategy_save_critic_optim(self, path: str, only_rank0: bool = False) -> None: self.strategy.save_optimizer(self.critic_optim, path, only_rank0) - def _get_model_state_dict_shard(self, model: torch.nn.Module, fully_update = False, **config): + def _get_model_state_dict_shard(self, model: torch.nn.Module, fully_update=False, **config): for state_dict in self.strategy.get_model_state_dict_shard(model, **config): if not self._update_lora_weights or fully_update: yield state_dict_to(state_dict) @@ -195,4 +200,4 @@ def _get_model_lora_config_dict(self, model: torch.nn.Module): if not self._update_lora_weights: return None unwrapped_model = self.strategy.unwrap_model(model) - return LoRAConstructor.extract_lora_config(unwrapped_model) \ No newline at end of file + return LoRAConstructor.extract_lora_config(unwrapped_model) From baeee8ae09e78b7627f13baeb7946476b14a6e53 Mon Sep 17 00:00:00 2001 From: ver217 Date: Tue, 16 May 2023 11:57:13 +0800 Subject: [PATCH 24/26] [chat] add nccl debug info --- .github/workflows/run_chatgpt_examples.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/run_chatgpt_examples.yml b/.github/workflows/run_chatgpt_examples.yml index 129bf7ed3270..8a818461eb77 100644 --- a/.github/workflows/run_chatgpt_examples.yml +++ b/.github/workflows/run_chatgpt_examples.yml @@ -54,3 +54,4 @@ jobs: SFT_DATASET: /data/scratch/github_actions/chat/data.json PROMPT_PATH: /data/scratch/github_actions/chat/prompts_en.jsonl PRETRAIN_DATASET: /data/scratch/github_actions/chat/alpaca_data.json + NCCL_DEBUG: INFO From e01bfc0fad82652947372295068e94c40915b4e9 Mon Sep 17 00:00:00 2001 From: ver217 Date: Tue, 16 May 2023 15:47:16 +0800 Subject: [PATCH 25/26] [chat] skip ray test --- .github/workflows/run_chatgpt_examples.yml | 1 - applications/Chat/coati/ray/detached_trainer_ppo.py | 3 --- applications/Chat/examples/test_ci.sh | 3 ++- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/.github/workflows/run_chatgpt_examples.yml b/.github/workflows/run_chatgpt_examples.yml index 8a818461eb77..129bf7ed3270 100644 --- a/.github/workflows/run_chatgpt_examples.yml +++ b/.github/workflows/run_chatgpt_examples.yml @@ -54,4 +54,3 @@ jobs: SFT_DATASET: /data/scratch/github_actions/chat/data.json PROMPT_PATH: /data/scratch/github_actions/chat/prompts_en.jsonl PRETRAIN_DATASET: /data/scratch/github_actions/chat/alpaca_data.json - NCCL_DEBUG: INFO diff --git a/applications/Chat/coati/ray/detached_trainer_ppo.py b/applications/Chat/coati/ray/detached_trainer_ppo.py index 7dc74c8ac8ca..5f0032716f93 100644 --- a/applications/Chat/coati/ray/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/detached_trainer_ppo.py @@ -70,9 +70,6 @@ def __init__( debug: bool = False, update_lora_weights: bool = False, ) -> None: - # TODO(ver217): remove debug info - import os - print(os.environ) # set environment variables if env_info: set_dist_env(env_info=env_info) diff --git a/applications/Chat/examples/test_ci.sh b/applications/Chat/examples/test_ci.sh index 57034aa9989e..2fa6c6052f8d 100755 --- a/applications/Chat/examples/test_ci.sh +++ b/applications/Chat/examples/test_ci.sh @@ -125,4 +125,5 @@ rm -rf ${BASE}/rm_ckpt_gpt.pt rm -rf ${BASE}/actor_checkpoint_prompts.pt -cd ${BASE}/ray && bash test_ci.sh && cd ${BASE} +# 3080 doesn't support P2P, skip this test +# cd ${BASE}/ray && bash test_ci.sh && cd ${BASE} From aaa0fe53b4b838f881030a5f1e10f0826c7fc822 Mon Sep 17 00:00:00 2001 From: ver217 Date: Thu, 18 May 2023 12:25:43 +0800 Subject: [PATCH 26/26] [doc] fix typo --- applications/Chat/coati/ray/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/Chat/coati/ray/README.md b/applications/Chat/coati/ray/README.md index eaddc4f9f4f4..228155a6855b 100644 --- a/applications/Chat/coati/ray/README.md +++ b/applications/Chat/coati/ray/README.md @@ -2,7 +2,7 @@ ## Detach Experience Makers and Trainers - We can completely separate the trainers and makers. +We can completely separate the trainers and makers.

@@ -138,7 +138,7 @@ We can deploy different strategies to makers and trainers. Here are some notions

-### 2 Makers 2 Trainer2 +### 2 Makers 2 Trainer