From c9f869e352180eefea08a6ddf014ac27492d771c Mon Sep 17 00:00:00 2001 From: Sachidanand Alle Date: Mon, 14 Dec 2020 10:14:26 -0800 Subject: [PATCH 01/10] Tutorial to train a 2D Deepgrow Model --- deepgrow/ignite/deepgrow_evaluation_2d.py | 140 +++++++++ deepgrow/ignite/deepgrow_training_2d.py | 351 ++++++++++++++++++++++ 2 files changed, 491 insertions(+) create mode 100644 deepgrow/ignite/deepgrow_evaluation_2d.py create mode 100644 deepgrow/ignite/deepgrow_training_2d.py diff --git a/deepgrow/ignite/deepgrow_evaluation_2d.py b/deepgrow/ignite/deepgrow_evaluation_2d.py new file mode 100644 index 0000000000..566b5ac7b3 --- /dev/null +++ b/deepgrow/ignite/deepgrow_evaluation_2d.py @@ -0,0 +1,140 @@ +import argparse +import distutils.util +import json +import logging +import os +import sys +import time + +import torch + +from monai.apps.deepgrow.interaction import Interaction +from monai.engines import SupervisedEvaluator +from monai.handlers import ( + StatsHandler, + TensorBoardStatsHandler, + MeanDice) +from monai.inferers import SimpleInferer +from monai.utils import set_determinism +from .deepgrow_training_2d import ( + get_network, + get_loaders, + get_pre_transforms, + get_click_transforms, + get_post_transforms +) + + +def create_validator(args, click): + set_determinism(seed=args.seed) + + device = torch.device("cuda" if args.use_gpu else "cpu") + + pre_transforms = get_pre_transforms(json.loads(args.roi_size)) + click_transforms = get_click_transforms(sigmoid=False) + post_transform = get_post_transforms(sigmoid=False) + + # define training components + network = get_network(args).to(device) + + logging.info('Loading Network...') + map_location = {"cuda:0": "cuda:{}".format(args.local_rank)} + + checkpoint = torch.load(args.model_path, map_location=map_location) + for key in list(checkpoint.keys()): + if 'module.' in key: + checkpoint[key.replace('module.', '')] = checkpoint[key] + del checkpoint[key] + + network.load_state_dict(checkpoint) + + # define event-handlers for engine + _, val_loader = get_loaders(args, pre_transforms, train=False) + fold_size = int(len(val_loader.dataset) / args.batch / args.folds) if args.folds else 0 + logging.info('Using Fold-Size: {}'.format(fold_size)) + + val_handlers = [ + StatsHandler(output_transform=lambda x: None), + TensorBoardStatsHandler(log_dir=args.output, output_transform=lambda x: None), + ] + + evaluator = SupervisedEvaluator( + device=device, + val_data_loader=val_loader, + network=network, + iteration_update=Interaction( + transforms=click_transforms, + max_interactions=click, + train=False), + inferer=SimpleInferer(), + post_transform=post_transform, + val_handlers=val_handlers, + key_val_metric={ + f'clicks_{click}_val_dice': MeanDice( + include_background=False, + output_transform=lambda x: (x["pred"], x["label"]) + ) + } + ) + return evaluator + + +def strtobool(val): + return bool(distutils.util.strtobool(val)) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument('-s', '--seed', type=int, default=42) + + parser.add_argument('-n', '--network', default='bunet', choices=['native', 'bunet', 'foo']) + parser.add_argument('-z', '--net_size', type=int, default=64) + parser.add_argument('-f', '--folds', type=int, default=10) + + parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') + parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') + parser.add_argument('-i', '--input', default='/workspace/data/52432/2D') + parser.add_argument('-o', '--output', default='output') + + parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') + parser.add_argument('-b', '--batch', type=int, default=1) + parser.add_argument('-t', '--limit', type=int, default=20) + parser.add_argument('-m', '--model_path', default="output/model.pt") + parser.add_argument('--roi_size', default="[128, 128]") + + parser.add_argument('-iv', '--max_val_interactions', default="[1,2,5,10,15]") + parser.add_argument('--multi_gpu', type=strtobool, default='false') + parser.add_argument("--local_rank", type=int, default=0) + + args = parser.parse_args() + if args.local_rank == 0: + for arg in vars(args): + logging.info('USING:: {} = {}'.format(arg, getattr(args, arg))) + print("") + + if not os.path.exists(args.output): + logging.info('output path [{}] does not exist. creating it now.'.format(args.output)) + os.makedirs(args.output, exist_ok=True) + + clicks = json.loads(args.max_val_interactions) + for click in clicks: + logging.info('+++++++++++++++++++++++++++++++++++++++++++++++++++++') + logging.info(' CLICKS = {}'.format(click)) + logging.info('+++++++++++++++++++++++++++++++++++++++++++++++++++++') + trainer = create_validator(args, click) + + start_time = time.time() + trainer.run() + end_time = time.time() + + logging.info('Total Run Time {}'.format(end_time - start_time)) + + +if __name__ == "__main__": + logging.basicConfig( + stream=sys.stdout, + level=logging.INFO, + format='[%(asctime)s.%(msecs)03d][%(levelname)5s] - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + main() diff --git a/deepgrow/ignite/deepgrow_training_2d.py b/deepgrow/ignite/deepgrow_training_2d.py new file mode 100644 index 0000000000..271250bfe7 --- /dev/null +++ b/deepgrow/ignite/deepgrow_training_2d.py @@ -0,0 +1,351 @@ +import argparse +import distutils.util +import json +import logging +import os +import sys +import time + +import torch +import torch.distributed as dist + +from monai.apps.deepgrow import ( + AddInitialSeedPointd, + FindDiscrepancyRegionsd, + AddRandomGuidanced, + AddGuidanceSignald, + create_dataset, + Interaction +) +from monai.data import partition_dataset +from monai.data.dataloader import DataLoader +from monai.data.dataset import PersistentDataset +from monai.engines import SupervisedEvaluator +from monai.engines import SupervisedTrainer +from monai.handlers import ( + StatsHandler, + TensorBoardStatsHandler, + ValidationHandler, + LrScheduleHandler, + CheckpointSaver, + MeanDice) +from monai.inferers import SimpleInferer +from monai.losses import DiceLoss +from monai.networks.nets import BasicUNet +from monai.transforms import ( + Compose, + LoadNumpyd, + AddChanneld, + ScaleIntensityRanged, + Resized, + ToTensord, + ToNumpyd, + Activationsd, + AsDiscreted, + CropForegroundd, +) +from monai.utils import set_determinism + + +def get_network(args): + features = (64, 128, 256, 512, 1024, 64) if args.channels == 64 else (32, 32, 64, 128, 256, 32) + logging.info('Using BasicUnet with features: {}'.format(features)) + return BasicUNet(dimensions=2, in_channels=3, out_channels=1, features=features) + + +def get_pre_transforms(roi_size): + return Compose([ + LoadNumpyd(keys=('image', 'label')), + AddChanneld(keys=('image', 'label')), + ScaleIntensityRanged(keys='image', a_min=-1024, a_max=1024, b_min=-1.0, b_max=1.0, clip=True), + CropForegroundd(keys=('image', 'label'), source_key='label', margin=20), + Resized(keys=('image', 'label'), spatial_size=roi_size, mode=('area', 'nearest')), + + AddInitialSeedPointd(label='label', guidance='guidance'), + AddGuidanceSignald(image='image', guidance='guidance'), + ToTensord(keys=('image', 'label')) + ]) + + +def get_click_transforms(sigmoid=True): + transforms = [ + Activationsd(keys='pred', sigmoid=True), + ToNumpyd(keys=('image', 'label', 'pred', 'probability', 'guidance')), + FindDiscrepancyRegionsd(label='label', pred='pred', discrepancy='discrepancy', batched=True), + AddRandomGuidanced(guidance='guidance', discrepancy='discrepancy', probability='probability', batched=True), + AddGuidanceSignald(image='image', guidance='guidance', batched=True), + ToTensord(keys=('image', 'label')) + ] + + if not sigmoid: + transforms.pop(0) + return Compose(transforms) + + +def get_post_transforms(sigmoid=True): + transforms = [ + Activationsd(keys='pred', sigmoid=True), + AsDiscreted(keys='pred', threshold_values=True, logit_thresh=0.5) + ] + + if not sigmoid: + transforms.pop(0) + return Compose(transforms) + + +def get_loaders(args, pre_transforms, train=True): + multi_gpu = args.multi_gpu + local_rank = args.local_rank + + dataset_json = os.path.join(args.input, 'dataset.json') + if not os.path.exists(dataset_json): + with open(os.path.join(args.dataset_json)) as f: + datalist = json.load(f) + + datalist = create_dataset( + datalist=datalist['training'], + base_dir=args.dataset_root, + output_dir=os.path.join(args.input), + dimension=2, + pixdim=(1.0, 1.0) + ) + + with open(dataset_json, 'w') as fp: + json.dump(datalist, fp, indent=2) + + dataset_json = os.path.join(args.input, 'dataset.json') + with open(dataset_json) as f: + datalist = json.load(f) + + total_d = len(datalist) + datalist = datalist[-args.limit:] + total_l = len(datalist) + + if multi_gpu: + datalist = partition_dataset( + data=datalist, + num_partitions=dist.get_world_size(), + even_divisible=True, + shuffle=True, + seed=args.seed + )[local_rank] + + if train: + train_datalist, val_datalist = partition_dataset(datalist, ratios=[args.split, (1 - args.split)]) + + train_ds = PersistentDataset(train_datalist, pre_transforms) + train_loader = DataLoader( + train_ds, + batch_size=args.batch, + shuffle=True, + num_workers=16) + logging.info('{}:: Total Records used for Training is: {}/{}/{}'.format( + local_rank, len(train_ds), total_l, total_d)) + else: + train_loader = None + val_datalist = datalist + + val_ds = PersistentDataset(val_datalist, pre_transforms) + val_loader = DataLoader(val_ds, batch_size=args.batch, num_workers=8) + logging.info('{}:: Total Records used for Validation is: {}/{}/{}'.format( + local_rank, len(val_ds), total_l, total_d)) + + return train_loader, val_loader + + +def create_trainer(args): + set_determinism(seed=args.seed) + + multi_gpu = args.multi_gpu + local_rank = args.local_rank + if multi_gpu: + dist.init_process_group(backend="nccl", init_method="env://") + device = torch.device("cuda:{}".format(local_rank)) + torch.cuda.set_device(device) + else: + device = torch.device("cuda" if args.use_gpu else "cpu") + + pre_transforms = get_pre_transforms(json.loads(args.roi_size)) + click_transforms = get_click_transforms() + post_transform = get_post_transforms() + + train_loader, val_loader = get_loaders(args, pre_transforms) + + # define training components + network = get_network(args).to(device) + if multi_gpu: + network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[local_rank], output_device=local_rank) + + if args.resume: + logging.info('{}:: Loading Network...'.format(local_rank)) + map_location = {"cuda:0": "cuda:{}".format(local_rank)} + network.load_state_dict(torch.load(args.model_filepath, map_location=map_location)) + + # define event-handlers for engine + val_handlers = [ + StatsHandler(output_transform=lambda x: None), + TensorBoardStatsHandler(log_dir=args.output, output_transform=lambda x: None), + CheckpointSaver(save_dir=args.output, save_dict={"net": network}, save_key_metric=True, save_final=True, + save_interval=args.save_interval, final_filename='model.pt') + ] + val_handlers = val_handlers if local_rank == 0 else None + evaluator = SupervisedEvaluator( + device=device, + val_data_loader=val_loader, + network=network, + iteration_update=Interaction( + transforms=click_transforms, + max_interactions=args.max_val_interactions, + key_probability='probability', + train=False), + inferer=SimpleInferer(), + post_transform=post_transform, + key_val_metric={ + "val_dice": MeanDice( + include_background=False, + output_transform=lambda x: (x["pred"], x["label"]) + ) + }, + val_handlers=val_handlers + ) + + loss_function = DiceLoss(sigmoid=True, squared_pred=True) + optimizer = torch.optim.Adam(network.parameters(), args.learning_rate) + lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1) + + train_handlers = [ + LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True), + ValidationHandler(validator=evaluator, interval=args.val_freq, epoch_level=True), + StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]), + TensorBoardStatsHandler(log_dir=args.output, tag_name="train_loss", output_transform=lambda x: x["loss"]), + CheckpointSaver(save_dir=args.output, save_dict={"net": network, "opt": optimizer, "lr": lr_scheduler}, + save_interval=args.save_interval * 2, save_final=True, final_filename='checkpoint.pt'), + ] + train_handlers = train_handlers if local_rank == 0 else train_handlers[:2] + + trainer = SupervisedTrainer( + device=device, + max_epochs=args.epochs, + train_data_loader=train_loader, + network=network, + iteration_update=Interaction( + transforms=click_transforms, + max_interactions=args.max_train_interactions, + key_probability='probability', + train=True), + optimizer=optimizer, + loss_function=loss_function, + inferer=SimpleInferer(), + post_transform=post_transform, + amp=args.amp, + key_train_metric={ + "train_dice": MeanDice( + include_background=False, + output_transform=lambda x: (x["pred"], x["label"]) + ) + }, + train_handlers=train_handlers, + ) + return trainer + + +def strtobool(val): + return bool(distutils.util.strtobool(val)) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument('-s', '--seed', type=int, default=42) + + parser.add_argument('-c', '--channels', type=int, default=64) + parser.add_argument('-d', '--dataset_root', default='/workspace/data/MSD_Spleen') + parser.add_argument('-j', '--dataset_json', default='/workspace/data/MSD_Spleen/dataset.json') + parser.add_argument('-i', '--input', default='/workspace/data/deepgrow/spleen/2D') + parser.add_argument('-o', '--output', default='output') + + parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') + parser.add_argument('-a', '--amp', type=strtobool, default='false') + + parser.add_argument('-e', '--epochs', type=int, default=100) + parser.add_argument('-b', '--batch', type=int, default=16) + parser.add_argument('-x', '--split', type=float, default=0.8) + parser.add_argument('-t', '--limit', type=int, default=0) + + parser.add_argument('-r', '--resume', type=strtobool, default='false') + parser.add_argument('-m', '--model_path', default="model/model.pt") + parser.add_argument('--roi_size', default="[128, 128]") + + parser.add_argument('-f', '--val_freq', type=int, default=1) + parser.add_argument('-lr', '--learning_rate', type=float, default=0.0001) + parser.add_argument('-it', '--max_train_interactions', type=int, default=15) + parser.add_argument('-iv', '--max_val_interactions', type=int, default=5) + + parser.add_argument('--save_interval', type=int, default=10) + parser.add_argument('--multi_gpu', type=strtobool, default='false') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument('--export', type=strtobool, default='false') + + args = parser.parse_args() + if args.local_rank == 0: + for arg in vars(args): + logging.info('USING:: {} = {}'.format(arg, getattr(args, arg))) + print("") + + if args.export: + logging.info('{}:: Loading PT Model from: {}'.format(args.local_rank, args.input)) + device = torch.device("cuda" if args.use_gpu else "cpu") + network = get_network(args).to(device) + + map_location = {"cuda:0": "cuda:{}".format(args.local_rank)} + network.load_state_dict(torch.load(args.input, map_location=map_location)) + + logging.info('{}:: Saving TorchScript Model'.format(args.local_rank)) + model_ts = torch.jit.script(network) + torch.jit.save(model_ts, os.path.join(args.output)) + return + + if not os.path.exists(args.output): + logging.info('output path [{}] does not exist. creating it now.'.format(args.output)) + os.makedirs(args.output, exist_ok=True) + + trainer = create_trainer(args) + + start_time = time.time() + trainer.run() + end_time = time.time() + + logging.info('Total Training Time {}'.format(end_time - start_time)) + if args.local_rank == 0: + logging.info('{}:: Saving Final PT Model'.format(args.local_rank)) + torch.save(trainer.network.state_dict(), os.path.join(args.output, 'model-final.pt')) + + if not args.multi_gpu: + logging.info('{}:: Saving TorchScript Model'.format(args.local_rank)) + model_ts = torch.jit.script(trainer.network) + torch.jit.save(model_ts, os.path.join(args.output, 'model-final.ts')) + + if args.multi_gpu: + dist.destroy_process_group() + + +if __name__ == "__main__": + logging.basicConfig( + stream=sys.stdout, + level=logging.INFO, + format='[%(asctime)s.%(msecs)03d][%(levelname)5s](%(name)s) - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + main() + +''' +# Single GPU (it will also export) +python deepgrow_training_2d.py + +# Multi GPU (run export separate) +python -m torch.distributed.launch \ + --nproc_per_node=`nvidia-smi -L | wc -l` \ + --nnodes=1 --node_rank=0 --master_addr="localhost" --master_port=1234 \ + -m deepgrow_training_2d --multi_gpu true -e 100 + +python deepgrow_training_2d.py --export +''' From 6ef4787b7048fdcc57d474e5c7e8e6235cdbb6d6 Mon Sep 17 00:00:00 2001 From: Sachidanand Alle Date: Wed, 23 Dec 2020 10:23:42 -0800 Subject: [PATCH 02/10] Deepgrow train/validate/inference example --- deepgrow/ignite/inference.ipynb | 212 ++++++++++++++++ deepgrow/ignite/inference_3d.ipynb | 229 ++++++++++++++++++ .../{deepgrow_training_2d.py => train.py} | 178 ++++++++------ deepgrow/ignite/train_3d.py | 70 ++++++ ...{deepgrow_evaluation_2d.py => validate.py} | 91 +++---- deepgrow/ignite/validate_3d.py | 49 ++++ 6 files changed, 706 insertions(+), 123 deletions(-) create mode 100644 deepgrow/ignite/inference.ipynb create mode 100644 deepgrow/ignite/inference_3d.ipynb rename deepgrow/ignite/{deepgrow_training_2d.py => train.py} (78%) create mode 100644 deepgrow/ignite/train_3d.py rename deepgrow/ignite/{deepgrow_evaluation_2d.py => validate.py} (75%) create mode 100644 deepgrow/ignite/validate_3d.py diff --git a/deepgrow/ignite/inference.ipynb b/deepgrow/ignite/inference.ipynb new file mode 100644 index 0000000000..4e3deda5c8 --- /dev/null +++ b/deepgrow/ignite/inference.ipynb @@ -0,0 +1,212 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import torch\n", + "from torch import jit\n", + "\n", + "from monai.apps.deepgrow.transforms import (\n", + " AddGuidanceFromPointsd,\n", + " AddGuidanceSignald,\n", + " Fetch2DSliced,\n", + " ResizeGuidanced,\n", + " RestoreCroppedLabeld,\n", + " SpatialCropGuidanced,\n", + ")\n", + "from monai.transforms import (\n", + " AsChannelFirstd,\n", + " Spacingd,\n", + " LoadNiftid,\n", + " AddChanneld,\n", + " NormalizeIntensityd,\n", + " ToTensord,\n", + " ToNumpyd,\n", + " Activationsd,\n", + " AsDiscreted,\n", + " Resized\n", + ")\n", + "\n", + "\n", + "def draw_points(guidance):\n", + " if guidance is None:\n", + " return\n", + " colors = ['r+', 'b+']\n", + " for color, points in zip(colors, guidance):\n", + " for p in points:\n", + " p1 = p[-1]\n", + " p2 = p[-2]\n", + " plt.plot(p1, p2, color, 'MarkerSize', 30)\n", + "\n", + "\n", + "def show_image(image, label, guidance=None):\n", + " plt.figure(\"check\", (12, 6))\n", + " plt.subplot(1, 2, 1)\n", + " plt.title(\"image\")\n", + " plt.imshow(image, cmap=\"gray\")\n", + "\n", + " if label is not None:\n", + " masked = np.ma.masked_where(label == 0, label)\n", + " plt.imshow(masked, 'jet', interpolation='none', alpha=0.7)\n", + "\n", + " draw_points(guidance)\n", + " plt.colorbar()\n", + "\n", + " if label is not None:\n", + " plt.subplot(1, 2, 2)\n", + " plt.title(\"label\")\n", + " plt.imshow(label)\n", + " plt.colorbar()\n", + " # draw_points(guidance)\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Pre Processing\n", + "roi_size = [256, 256]\n", + "pixdim = (1.0, 1.0)\n", + "dimensions = 2\n", + "\n", + "data = {\n", + " 'image': '/salle/Downloads/spleen_19.nii.gz',\n", + " 'foreground': [[354, 336, 40]], # ,[259,381,40]],\n", + " 'background': [],\n", + " 'spatial_size': [384, 384]\n", + "}\n", + "slice_idx = original_slice_idx = data['foreground'][0][2]\n", + "\n", + "pre_transforms = [\n", + " LoadNiftid(keys='image'),\n", + " AsChannelFirstd(keys='image'),\n", + " Spacingd(keys='image', pixdim=pixdim, mode='bilinear'),\n", + "\n", + " AddGuidanceFromPointsd(ref_image='image', guidance='guidance', foreground='foreground', background='background',\n", + " dimensions=dimensions),\n", + " Fetch2DSliced(keys='image', guidance='guidance'),\n", + " AddChanneld(keys='image'),\n", + "\n", + " SpatialCropGuidanced(keys='image', guidance='guidance', spatial_size=roi_size),\n", + " Resized(keys='image', spatial_size=roi_size, mode='area'),\n", + " ResizeGuidanced(guidance='guidance', ref_image='image'),\n", + " NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0),\n", + " AddGuidanceSignald(image='image', guidance='guidance'),\n", + " ToTensord(keys='image')\n", + "]\n", + "\n", + "original_image = None\n", + "original_image_slice = None\n", + "for t in pre_transforms:\n", + " tname = type(t).__name__\n", + "\n", + " data = t(data)\n", + " image = data['image']\n", + " label = data.get('label')\n", + " guidance = data.get('guidance')\n", + "\n", + " print(\"{} => image shape: {}, label shape: {}\".format(\n", + " tname, image.shape, label.shape if label is not None else None))\n", + "\n", + " image = image if tname == 'Fetch2DSliced' else image[:, :, slice_idx] if tname in (\n", + " 'LoadNiftid') else image[slice_idx, :, :]\n", + " label = label if tname == 'Fetch2DSliced' else label[:, :, slice_idx] if tname in (\n", + " 'xyz') else label[slice_idx, :, :] if label is not None else None\n", + "\n", + " guidance = guidance if guidance else [np.roll(data['foreground'], 1).tolist(), []]\n", + " print('Guidance: {}'.format(guidance))\n", + "\n", + " show_image(image, label, guidance)\n", + " if tname == 'Fetch2DSliced':\n", + " slice_idx = 0\n", + " if tname == 'LoadNiftid':\n", + " original_image = data['image']\n", + " if tname == 'AddChanneld':\n", + " original_image_slice = data['image']\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Evaluation\n", + "model_path = '/workspace/Downloads/models/roi_b8_256x256_c32.ts'\n", + "model = jit.load(model_path)\n", + "model.cuda()\n", + "model.eval()\n", + "\n", + "inputs = data['image'][None].cuda()\n", + "with torch.no_grad():\n", + " outputs = model(inputs)\n", + "outputs = outputs[0]\n", + "data['pred'] = outputs\n", + "\n", + "post_transforms = [\n", + " Activationsd(keys='pred', sigmoid=True),\n", + " AsDiscreted(keys='pred', threshold_values=True, logit_thresh=0.5),\n", + " ToNumpyd(keys='pred'),\n", + " RestoreCroppedLabeld(keys='pred', ref_image='image', mode='nearest'),\n", + "]\n", + "\n", + "for t in post_transforms:\n", + " tname = type(t).__name__\n", + "\n", + " data = t(data)\n", + " image = data['image']\n", + " label = data['pred']\n", + " print(\"{} => image shape: {}, pred shape: {}\".format(tname, image.shape, label.shape))\n", + "\n", + " if tname in 'RestoreCroppedLabeld':\n", + " image = original_image[:, :, original_slice_idx]\n", + " label = label[0, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[original_slice_idx]\n", + " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", + " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", + " show_image(image, label)\n", + " else:\n", + " image = image[0, :, :].detach().cpu().numpy() if torch.is_tensor(image) else image[0]\n", + " label = label[0, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[0]\n", + " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", + " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", + " show_image(image, label)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/deepgrow/ignite/inference_3d.ipynb b/deepgrow/ignite/inference_3d.ipynb new file mode 100644 index 0000000000..873398407c --- /dev/null +++ b/deepgrow/ignite/inference_3d.ipynb @@ -0,0 +1,229 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import torch\n", + "from torch import jit\n", + "\n", + "from monai.apps.deepgrow.transforms import (\n", + " AddGuidanceFromPointsd,\n", + " AddGuidanceSignald,\n", + " ResizeGuidanced,\n", + " RestoreCroppedLabeld,\n", + " SpatialCropGuidanced,\n", + ")\n", + "from monai.transforms import (\n", + " AsChannelFirstd,\n", + " Spacingd,\n", + " LoadNiftid,\n", + " AddChanneld,\n", + " NormalizeIntensityd,\n", + " ToTensord,\n", + " ToNumpyd,\n", + " Activationsd,\n", + " AsDiscreted,\n", + " Resized\n", + ")\n", + "\n", + "\n", + "def draw_points(guidance):\n", + " if guidance is None:\n", + " return\n", + " colors = ['r+', 'b+']\n", + " for color, points in zip(colors, guidance):\n", + " for p in points:\n", + " p1 = p[-1]\n", + " p2 = p[-2]\n", + " plt.plot(p1, p2, color, 'MarkerSize', 30)\n", + "\n", + "\n", + "def show_image(image, label, guidance=None):\n", + " plt.figure(\"check\", (12, 6))\n", + " plt.subplot(1, 2, 1)\n", + " plt.title(\"image\")\n", + " plt.imshow(image, cmap=\"gray\")\n", + "\n", + " if label is not None:\n", + " masked = np.ma.masked_where(label == 0, label)\n", + " plt.imshow(masked, 'jet', interpolation='none', alpha=0.7)\n", + "\n", + " draw_points(guidance)\n", + " plt.colorbar()\n", + "\n", + " if label is not None:\n", + " plt.subplot(1, 2, 2)\n", + " plt.title(\"label\")\n", + " plt.imshow(label)\n", + " plt.colorbar()\n", + " # draw_points(guidance)\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Pre Processing\n", + "roi_size = [192, 192, 192]\n", + "model_size = [96, 96, 96]\n", + "pixdim = (1.0, 1.0, 1.0)\n", + "dimensions = 3\n", + "\n", + "data = {\n", + " # 'image': '/salle/Downloads/spleen_19.nii.gz',\n", + " # 'foreground': [[354, 336, 40],[259,381,40]],\n", + " 'image': '/salle/Downloads/_image.nii.gz',\n", + " 'foreground': [[73, 177, 90]],\n", + " 'background': [],\n", + "}\n", + "slice_idx = original_slice_idx = data['foreground'][0][2]\n", + "\n", + "pre_transforms = [\n", + " LoadNiftid(keys='image'),\n", + " AsChannelFirstd(keys='image'),\n", + " Spacingd(keys='image', pixdim=pixdim, mode='bilinear'),\n", + "\n", + " AddGuidanceFromPointsd(ref_image='image', guidance='guidance', foreground='foreground', background='background',\n", + " dimensions=dimensions),\n", + " AddChanneld(keys='image'),\n", + "\n", + " SpatialCropGuidanced(keys='image', guidance='guidance', spatial_size=roi_size),\n", + " Resized(keys='image', spatial_size=model_size, mode='area'),\n", + " ResizeGuidanced(guidance='guidance', ref_image='image'),\n", + " NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0),\n", + " AddGuidanceSignald(image='image', guidance='guidance', dimensions=dimensions),\n", + " ToTensord(keys='image')\n", + "]\n", + "\n", + "original_image = None\n", + "for t in pre_transforms:\n", + " tname = type(t).__name__\n", + " data = t(data)\n", + " image = data['image']\n", + " label = data.get('label')\n", + " guidance = data.get('guidance')\n", + "\n", + " print(\"{} => image shape: {}\".format(tname, image.shape))\n", + "\n", + " guidance = guidance if guidance else [np.roll(data['foreground'], 1).tolist(), []]\n", + " slice_idx = guidance[0][0][0] if guidance else slice_idx\n", + " print('Guidance: {}; Slice Idx: {}'.format(guidance, slice_idx))\n", + " if tname == 'Resized':\n", + " continue\n", + "\n", + " image = image[:, :, slice_idx] if tname in ('LoadNiftid') else image[slice_idx] if tname in (\n", + " 'AsChannelFirstd', 'Spacingd', 'AddGuidanceFromPointsd') else image[0][slice_idx]\n", + " label = None\n", + "\n", + " show_image(image, label, guidance)\n", + " if tname == 'LoadNiftid':\n", + " original_image = data['image']\n", + " if tname == 'AddChanneld':\n", + " original_image_slice = data['image']\n", + " if tname == 'SpatialCropGuidanced':\n", + " spatial_image = data['image']\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "# Evaluation\n", + "model_path = '/workspace/Downloads/models/3d_roi_192_m96_b1_c64.ts'\n", + "model = jit.load(model_path)\n", + "model.cuda()\n", + "model.eval()\n", + "\n", + "inputs = data['image'][None].cuda()\n", + "with torch.no_grad():\n", + " outputs = model(inputs)\n", + "outputs = outputs[0]\n", + "data['pred'] = outputs\n", + "\n", + "post_transforms = [\n", + " Activationsd(keys='pred', sigmoid=True),\n", + " AsDiscreted(keys='pred', threshold_values=True, logit_thresh=0.5),\n", + " ToNumpyd(keys='pred'),\n", + " RestoreCroppedLabeld(keys='pred', ref_image='image', mode='nearest'),\n", + "]\n", + "\n", + "pred = None\n", + "for t in post_transforms:\n", + " tname = type(t).__name__\n", + "\n", + " data = t(data)\n", + " image = data['image']\n", + " label = data['pred']\n", + " print(\"{} => image shape: {}, pred shape: {}; slice_idx: {}\".format(tname, image.shape, label.shape, slice_idx))\n", + "\n", + " if tname in 'RestoreCroppedLabeld':\n", + " pred = label\n", + "\n", + " image = original_image[:, :, original_slice_idx]\n", + " label = label[original_slice_idx]\n", + " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", + " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", + " show_image(image, label)\n", + " elif tname == 'xToNumpyd':\n", + " for i in range(label.shape[-1]):\n", + " img = image[0, i, :, :].detach().cpu().numpy() if torch.is_tensor(image) else image[0][i]\n", + " lab = label[0, i, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[0][i]\n", + " if np.sum(lab) > 0:\n", + " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", + " i, img.shape, lab.shape, np.min(lab), np.max(lab), np.sum(lab)))\n", + " show_image(img, lab)\n", + " else:\n", + " image = image[0, slice_idx, :, :].detach().cpu().numpy() if torch.is_tensor(image) else image[0][slice_idx]\n", + " label = label[0, slice_idx, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[0][slice_idx]\n", + " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", + " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", + " show_image(image, label)\n", + "\n", + "for i in range(pred.shape[0]):\n", + " image = original_image[:, :, i]\n", + " label = pred[i, :, :]\n", + " if np.sum(label) == 0:\n", + " continue\n", + "\n", + " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", + " i, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", + " show_image(image, label)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/deepgrow/ignite/deepgrow_training_2d.py b/deepgrow/ignite/train.py similarity index 78% rename from deepgrow/ignite/deepgrow_training_2d.py rename to deepgrow/ignite/train.py index 271250bfe7..7c2b4651d1 100644 --- a/deepgrow/ignite/deepgrow_training_2d.py +++ b/deepgrow/ignite/train.py @@ -10,6 +10,7 @@ import torch.distributed as dist from monai.apps.deepgrow import ( + SpatialCropForegroundd, AddInitialSeedPointd, FindDiscrepancyRegionsd, AddRandomGuidanced, @@ -31,71 +32,80 @@ MeanDice) from monai.inferers import SimpleInferer from monai.losses import DiceLoss -from monai.networks.nets import BasicUNet +from monai.networks.nets import BasicUNet, UNet, Norm from monai.transforms import ( Compose, LoadNumpyd, AddChanneld, - ScaleIntensityRanged, - Resized, + NormalizeIntensityd, ToTensord, ToNumpyd, Activationsd, AsDiscreted, - CropForegroundd, + Resized, ) from monai.utils import set_determinism -def get_network(args): - features = (64, 128, 256, 512, 1024, 64) if args.channels == 64 else (32, 32, 64, 128, 256, 32) - logging.info('Using BasicUnet with features: {}'.format(features)) - return BasicUNet(dimensions=2, in_channels=3, out_channels=1, features=features) - - -def get_pre_transforms(roi_size): +def get_network(network, channels, dimensions): + if network == 'unet': + if channels == 16: + features = (16, 32, 64, 128, 256) + elif channels == 32: + features = (32, 64, 128, 256, 512) + else: + features = (64, 128, 256, 512, 1024) + logging.info('Using Unet with features: {}'.format(features)) + network = UNet(dimensions=dimensions, in_channels=3, out_channels=1, channels=features, strides=[2, 2, 2, 2], + norm=Norm.BATCH) + else: + if channels == 16: + features = (16, 32, 64, 128, 256, 16) + elif channels == 32: + features = (32, 64, 128, 256, 512, 32) + else: + features = (64, 128, 256, 512, 1024, 64) + logging.info('Using BasicUnet with features: {}'.format(features)) + network = BasicUNet(dimensions=dimensions, in_channels=3, out_channels=1, features=features) + return network + + +def get_pre_transforms(roi_size, model_size, dimensions): return Compose([ LoadNumpyd(keys=('image', 'label')), AddChanneld(keys=('image', 'label')), - ScaleIntensityRanged(keys='image', a_min=-1024, a_max=1024, b_min=-1.0, b_max=1.0, clip=True), - CropForegroundd(keys=('image', 'label'), source_key='label', margin=20), - Resized(keys=('image', 'label'), spatial_size=roi_size, mode=('area', 'nearest')), - - AddInitialSeedPointd(label='label', guidance='guidance'), - AddGuidanceSignald(image='image', guidance='guidance'), + SpatialCropForegroundd(keys=('image', 'label'), source_key='label', spatial_size=roi_size), + Resized(keys=('image', 'label'), spatial_size=model_size, mode=('area', 'nearest')), + NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0), + AddInitialSeedPointd(label='label', guidance='guidance', dimensions=dimensions), + AddGuidanceSignald(image='image', guidance='guidance', dimensions=dimensions), ToTensord(keys=('image', 'label')) ]) -def get_click_transforms(sigmoid=True): - transforms = [ +def get_click_transforms(dimensions): + return Compose([ Activationsd(keys='pred', sigmoid=True), ToNumpyd(keys=('image', 'label', 'pred', 'probability', 'guidance')), FindDiscrepancyRegionsd(label='label', pred='pred', discrepancy='discrepancy', batched=True), - AddRandomGuidanced(guidance='guidance', discrepancy='discrepancy', probability='probability', batched=True), - AddGuidanceSignald(image='image', guidance='guidance', batched=True), + AddRandomGuidanced(guidance='guidance', discrepancy='discrepancy', probability='probability', + dimensions=dimensions, batched=True), + AddGuidanceSignald(image='image', guidance='guidance', dimensions=dimensions, batched=True), ToTensord(keys=('image', 'label')) - ] - - if not sigmoid: - transforms.pop(0) - return Compose(transforms) + ]) -def get_post_transforms(sigmoid=True): - transforms = [ +def get_post_transforms(): + return Compose([ Activationsd(keys='pred', sigmoid=True), AsDiscreted(keys='pred', threshold_values=True, logit_thresh=0.5) - ] - - if not sigmoid: - transforms.pop(0) - return Compose(transforms) + ]) def get_loaders(args, pre_transforms, train=True): multi_gpu = args.multi_gpu local_rank = args.local_rank + dimensions = args.dimensions dataset_json = os.path.join(args.input, 'dataset.json') if not os.path.exists(dataset_json): @@ -106,8 +116,8 @@ def get_loaders(args, pre_transforms, train=True): datalist=datalist['training'], base_dir=args.dataset_root, output_dir=os.path.join(args.input), - dimension=2, - pixdim=(1.0, 1.0) + dimension=dimensions, + pixdim=[1.0] * dimensions ) with open(dataset_json, 'w') as fp: @@ -165,14 +175,14 @@ def create_trainer(args): else: device = torch.device("cuda" if args.use_gpu else "cpu") - pre_transforms = get_pre_transforms(json.loads(args.roi_size)) - click_transforms = get_click_transforms() + pre_transforms = get_pre_transforms(json.loads(args.roi_size), json.loads(args.model_size), args.dimensions) + click_transforms = get_click_transforms(args.dimensions) post_transform = get_post_transforms() train_loader, val_loader = get_loaders(args, pre_transforms) # define training components - network = get_network(args).to(device) + network = get_network(args.network, args.channels, args.dimensions).to(device) if multi_gpu: network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[local_rank], output_device=local_rank) @@ -249,44 +259,7 @@ def create_trainer(args): return trainer -def strtobool(val): - return bool(distutils.util.strtobool(val)) - - -def main(): - parser = argparse.ArgumentParser() - - parser.add_argument('-s', '--seed', type=int, default=42) - - parser.add_argument('-c', '--channels', type=int, default=64) - parser.add_argument('-d', '--dataset_root', default='/workspace/data/MSD_Spleen') - parser.add_argument('-j', '--dataset_json', default='/workspace/data/MSD_Spleen/dataset.json') - parser.add_argument('-i', '--input', default='/workspace/data/deepgrow/spleen/2D') - parser.add_argument('-o', '--output', default='output') - - parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') - parser.add_argument('-a', '--amp', type=strtobool, default='false') - - parser.add_argument('-e', '--epochs', type=int, default=100) - parser.add_argument('-b', '--batch', type=int, default=16) - parser.add_argument('-x', '--split', type=float, default=0.8) - parser.add_argument('-t', '--limit', type=int, default=0) - - parser.add_argument('-r', '--resume', type=strtobool, default='false') - parser.add_argument('-m', '--model_path', default="model/model.pt") - parser.add_argument('--roi_size', default="[128, 128]") - - parser.add_argument('-f', '--val_freq', type=int, default=1) - parser.add_argument('-lr', '--learning_rate', type=float, default=0.0001) - parser.add_argument('-it', '--max_train_interactions', type=int, default=15) - parser.add_argument('-iv', '--max_val_interactions', type=int, default=5) - - parser.add_argument('--save_interval', type=int, default=10) - parser.add_argument('--multi_gpu', type=strtobool, default='false') - parser.add_argument('--local_rank', type=int, default=0) - parser.add_argument('--export', type=strtobool, default='false') - - args = parser.parse_args() +def run(args): if args.local_rank == 0: for arg in vars(args): logging.info('USING:: {} = {}'.format(arg, getattr(args, arg))) @@ -295,7 +268,7 @@ def main(): if args.export: logging.info('{}:: Loading PT Model from: {}'.format(args.local_rank, args.input)) device = torch.device("cuda" if args.use_gpu else "cpu") - network = get_network(args).to(device) + network = get_network(args.network, args.channels, args.dimensions).to(device) map_location = {"cuda:0": "cuda:{}".format(args.local_rank)} network.load_state_dict(torch.load(args.input, map_location=map_location)) @@ -329,6 +302,51 @@ def main(): dist.destroy_process_group() +def strtobool(val): + return bool(distutils.util.strtobool(val)) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument('-s', '--seed', type=int, default=42) + parser.add_argument('--dimensions', type=int, default=2) + + parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) + parser.add_argument('-c', '--channels', type=int, default=32) + parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') + parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') + parser.add_argument('-i', '--input', default='/workspace/data/52432/2D') + parser.add_argument('-o', '--output', default='output') + + parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') + parser.add_argument('-a', '--amp', type=strtobool, default='false') + + parser.add_argument('-e', '--epochs', type=int, default=100) + parser.add_argument('-b', '--batch', type=int, default=8) + parser.add_argument('-x', '--split', type=float, default=0.8) + parser.add_argument('-t', '--limit', type=int, default=0) + + parser.add_argument('-r', '--resume', type=strtobool, default='false') + parser.add_argument('-m', '--model_path', default="output/model.pt") + parser.add_argument('--roi_size', default="[256, 256]") + parser.add_argument('--model_size', default="[256, 256]") + + parser.add_argument('-f', '--val_freq', type=int, default=1) + parser.add_argument('-lr', '--learning_rate', type=float, default=0.0001) + parser.add_argument('-it', '--max_train_interactions', type=int, default=15) + parser.add_argument('-iv', '--max_val_interactions', type=int, default=5) + + parser.add_argument('--save_interval', type=int, default=10) + parser.add_argument('--image_interval', type=int, default=1) + parser.add_argument('--multi_gpu', type=strtobool, default='false') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument('--export', type=strtobool, default='false') + + args = parser.parse_args() + run(args) + + if __name__ == "__main__": logging.basicConfig( stream=sys.stdout, @@ -339,13 +357,13 @@ def main(): ''' # Single GPU (it will also export) -python deepgrow_training_2d.py +python train.py # Multi GPU (run export separate) python -m torch.distributed.launch \ --nproc_per_node=`nvidia-smi -L | wc -l` \ --nnodes=1 --node_rank=0 --master_addr="localhost" --master_port=1234 \ - -m deepgrow_training_2d --multi_gpu true -e 100 + -m train --multi_gpu true -e 100 -python deepgrow_training_2d.py --export +python train.py --export ''' diff --git a/deepgrow/ignite/train_3d.py b/deepgrow/ignite/train_3d.py new file mode 100644 index 0000000000..54df1de8c5 --- /dev/null +++ b/deepgrow/ignite/train_3d.py @@ -0,0 +1,70 @@ +import argparse +import distutils.util +import logging +import sys + +import train + + +def strtobool(val): + return bool(distutils.util.strtobool(val)) + + +if __name__ == "__main__": + logging.basicConfig( + stream=sys.stdout, + level=logging.INFO, + format='[%(asctime)s.%(msecs)03d][%(levelname)5s](%(name)s) - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + + parser = argparse.ArgumentParser() + + parser.add_argument('-s', '--seed', type=int, default=42) + parser.add_argument('--dimensions', type=int, default=3) + + parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) + parser.add_argument('-c', '--channels', type=int, default=32) + parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') + parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') + parser.add_argument('-i', '--input', default='/workspace/data/52432/3D') + parser.add_argument('-o', '--output', default='output3D') + + parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') + parser.add_argument('-a', '--amp', type=strtobool, default='false') + + parser.add_argument('-e', '--epochs', type=int, default=200) + parser.add_argument('-b', '--batch', type=int, default=1) + parser.add_argument('-x', '--split', type=float, default=0.9) + parser.add_argument('-t', '--limit', type=int, default=0) + + parser.add_argument('-r', '--resume', type=strtobool, default='false') + parser.add_argument('-m', '--model_path', default="output3D/model.pt") + parser.add_argument('--roi_size', default="[256, 256, 256]") + parser.add_argument('--model_size', default="[128, 128, 128]") + + parser.add_argument('-f', '--val_freq', type=int, default=1) + parser.add_argument('-lr', '--learning_rate', type=float, default=0.0001) + parser.add_argument('-it', '--max_train_interactions', type=int, default=15) + parser.add_argument('-iv', '--max_val_interactions', type=int, default=20) + + parser.add_argument('--save_interval', type=int, default=10) + parser.add_argument('--image_interval', type=int, default=5) + parser.add_argument('--multi_gpu', type=strtobool, default='false') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument('--export', type=strtobool, default='false') + + args = parser.parse_args() + train.main(args) + +''' +# Single GPU (it will also export) +python train.py + +# Multi GPU (run export separate) +python -m torch.distributed.launch \ + --nproc_per_node=`nvidia-smi -L | wc -l` \ + --nnodes=1 --node_rank=0 --master_addr="localhost" --master_port=1234 \ + -m train --multi_gpu true -e 100 + +python train.py --export +''' diff --git a/deepgrow/ignite/deepgrow_evaluation_2d.py b/deepgrow/ignite/validate.py similarity index 75% rename from deepgrow/ignite/deepgrow_evaluation_2d.py rename to deepgrow/ignite/validate.py index 566b5ac7b3..f2b643f759 100644 --- a/deepgrow/ignite/deepgrow_evaluation_2d.py +++ b/deepgrow/ignite/validate.py @@ -8,6 +8,8 @@ import torch +import train +from monai.apps.deepgrow.handler import DeepgrowStatsHandler, SegmentationSaver from monai.apps.deepgrow.interaction import Interaction from monai.engines import SupervisedEvaluator from monai.handlers import ( @@ -16,13 +18,6 @@ MeanDice) from monai.inferers import SimpleInferer from monai.utils import set_determinism -from .deepgrow_training_2d import ( - get_network, - get_loaders, - get_pre_transforms, - get_click_transforms, - get_post_transforms -) def create_validator(args, click): @@ -30,33 +25,37 @@ def create_validator(args, click): device = torch.device("cuda" if args.use_gpu else "cpu") - pre_transforms = get_pre_transforms(json.loads(args.roi_size)) - click_transforms = get_click_transforms(sigmoid=False) - post_transform = get_post_transforms(sigmoid=False) + pre_transforms = train.get_pre_transforms(json.loads(args.roi_size), json.loads(args.model_size), args.dimensions) + click_transforms = train.get_click_transforms(args.dimensions) + post_transform = train.get_post_transforms() # define training components - network = get_network(args).to(device) + network = train.get_network(args.network, args.channels, args.dimensions).to(device) logging.info('Loading Network...') map_location = {"cuda:0": "cuda:{}".format(args.local_rank)} checkpoint = torch.load(args.model_path, map_location=map_location) - for key in list(checkpoint.keys()): - if 'module.' in key: - checkpoint[key.replace('module.', '')] = checkpoint[key] - del checkpoint[key] - network.load_state_dict(checkpoint) + network.eval() # define event-handlers for engine - _, val_loader = get_loaders(args, pre_transforms, train=False) + _, val_loader = train.get_loaders(args, pre_transforms, train=False) fold_size = int(len(val_loader.dataset) / args.batch / args.folds) if args.folds else 0 logging.info('Using Fold-Size: {}'.format(fold_size)) val_handlers = [ StatsHandler(output_transform=lambda x: None), TensorBoardStatsHandler(log_dir=args.output, output_transform=lambda x: None), + DeepgrowStatsHandler( + log_dir=args.output, + tag_name=f'clicks_{click}_val_dice', + fold_size=int(len(val_loader.dataset) / args.batch / args.folds) if args.folds else 0, + add_stdev=False, + ), ] + if args.save_seg: + val_handlers.append(SegmentationSaver(output_dir=os.path.join(args.output, f'clicks_{click}_images'))) evaluator = SupervisedEvaluator( device=device, @@ -79,6 +78,30 @@ def create_validator(args, click): return evaluator +def run(args): + if args.local_rank == 0: + for arg in vars(args): + logging.info('USING:: {} = {}'.format(arg, getattr(args, arg))) + print("") + + if not os.path.exists(args.output): + logging.info('output path [{}] does not exist. creating it now.'.format(args.output)) + os.makedirs(args.output, exist_ok=True) + + clicks = json.loads(args.max_val_interactions) + for click in clicks: + logging.info('+++++++++++++++++++++++++++++++++++++++++++++++++++++') + logging.info(' CLICKS = {}'.format(click)) + logging.info('+++++++++++++++++++++++++++++++++++++++++++++++++++++') + trainer = create_validator(args, click) + + start_time = time.time() + trainer.run() + end_time = time.time() + + logging.info('Total Run Time {}'.format(end_time - start_time)) + + def strtobool(val): return bool(distutils.util.strtobool(val)) @@ -87,48 +110,30 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--seed', type=int, default=42) + parser.add_argument('--dimensions', type=int, default=3) - parser.add_argument('-n', '--network', default='bunet', choices=['native', 'bunet', 'foo']) - parser.add_argument('-z', '--net_size', type=int, default=64) + parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) + parser.add_argument('-c', '--channels', type=int, default=32) parser.add_argument('-f', '--folds', type=int, default=10) parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') parser.add_argument('-i', '--input', default='/workspace/data/52432/2D') - parser.add_argument('-o', '--output', default='output') + parser.add_argument('-o', '--output', default='eval') + parser.add_argument('--save_seg', type=strtobool, default='false') parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') parser.add_argument('-b', '--batch', type=int, default=1) parser.add_argument('-t', '--limit', type=int, default=20) parser.add_argument('-m', '--model_path', default="output/model.pt") - parser.add_argument('--roi_size', default="[128, 128]") + parser.add_argument('--roi_size', default="[256, 256]") - parser.add_argument('-iv', '--max_val_interactions', default="[1,2,5,10,15]") + parser.add_argument('-iv', '--max_val_interactions', default="[0,1,2,5,10,15]") parser.add_argument('--multi_gpu', type=strtobool, default='false') parser.add_argument("--local_rank", type=int, default=0) args = parser.parse_args() - if args.local_rank == 0: - for arg in vars(args): - logging.info('USING:: {} = {}'.format(arg, getattr(args, arg))) - print("") - - if not os.path.exists(args.output): - logging.info('output path [{}] does not exist. creating it now.'.format(args.output)) - os.makedirs(args.output, exist_ok=True) - - clicks = json.loads(args.max_val_interactions) - for click in clicks: - logging.info('+++++++++++++++++++++++++++++++++++++++++++++++++++++') - logging.info(' CLICKS = {}'.format(click)) - logging.info('+++++++++++++++++++++++++++++++++++++++++++++++++++++') - trainer = create_validator(args, click) - - start_time = time.time() - trainer.run() - end_time = time.time() - - logging.info('Total Run Time {}'.format(end_time - start_time)) + run(args) if __name__ == "__main__": diff --git a/deepgrow/ignite/validate_3d.py b/deepgrow/ignite/validate_3d.py new file mode 100644 index 0000000000..0170493a5c --- /dev/null +++ b/deepgrow/ignite/validate_3d.py @@ -0,0 +1,49 @@ +import argparse +import distutils.util +import logging +import sys + +import validate + + +def strtobool(val): + return bool(distutils.util.strtobool(val)) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument('-s', '--seed', type=int, default=42) + parser.add_argument('--dimensions', type=int, default=3) + + parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) + parser.add_argument('-c', '--channels', type=int, default=32) + parser.add_argument('-f', '--folds', type=int, default=10) + + parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') + parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') + parser.add_argument('-i', '--input', default='/workspace/data/52432/3D') + parser.add_argument('-o', '--output', default='eval3D') + parser.add_argument('--save_seg', type=strtobool, default='false') + + parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') + parser.add_argument('-b', '--batch', type=int, default=1) + parser.add_argument('-t', '--limit', type=int, default=20) + parser.add_argument('-m', '--model_path', default="output3D/model.pt") + parser.add_argument('--roi_size', default="[128, 128, 128]") + + parser.add_argument('-iv', '--max_val_interactions', default="[0,1,2,5,10,15]") + parser.add_argument('--multi_gpu', type=strtobool, default='false') + parser.add_argument("--local_rank", type=int, default=0) + + args = parser.parse_args() + validate.run(args) + + +if __name__ == "__main__": + logging.basicConfig( + stream=sys.stdout, + level=logging.INFO, + format='[%(asctime)s.%(msecs)03d][%(levelname)5s] - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + main() From d1da5faf8cfaba6940b39e41655d1b28ca245c53 Mon Sep 17 00:00:00 2001 From: Sachidanand Alle Date: Mon, 4 Jan 2021 11:06:01 -0800 Subject: [PATCH 03/10] update train/val examples + add dataset create example Signed-off-by: Sachidanand Alle --- deepgrow/ignite/create_dataset.ipynb | 141 ++++++++++++++ deepgrow/ignite/create_dataset_3d.ipynb | 237 ++++++++++++++++++++++++ deepgrow/ignite/inference.ipynb | 23 ++- deepgrow/ignite/inference_3d.ipynb | 89 ++++++--- deepgrow/ignite/prepare_dataset.py | 92 +++++++++ deepgrow/ignite/train.py | 76 ++++---- deepgrow/ignite/train_3d.py | 17 +- deepgrow/ignite/validate.py | 24 ++- deepgrow/ignite/validate_3d.py | 9 +- 9 files changed, 615 insertions(+), 93 deletions(-) create mode 100755 deepgrow/ignite/create_dataset.ipynb create mode 100755 deepgrow/ignite/create_dataset_3d.ipynb create mode 100644 deepgrow/ignite/prepare_dataset.py diff --git a/deepgrow/ignite/create_dataset.ipynb b/deepgrow/ignite/create_dataset.ipynb new file mode 100755 index 0000000000..bed88a63f9 --- /dev/null +++ b/deepgrow/ignite/create_dataset.ipynb @@ -0,0 +1,141 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import argparse\n", + "import glob\n", + "import json\n", + "import os\n", + "import shutil\n", + "import copy\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "from monai.config import print_config\n", + "from monai.transforms import (\n", + " LoadNiftid,\n", + " AsChannelFirstd,\n", + " Spacingd,\n", + " Orientationd,\n", + "\n", + " AddChanneld,\n", + " Resized,\n", + " NormalizeIntensityd,\n", + " ToTensord\n", + ")\n", + "\n", + "from byoc import (\n", + " SpatialCropForegroundd\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "roi_size = [256, 256]\n", + "data = {'image': '/workspace/data/52432/Training/img/img0001.nii.gz', 'label': '/workspace/data/52432/Training/label/label0001.nii.gz'}\n", + "slice_idx = 111\n", + "\n", + "transforms = [\n", + " LoadNiftid(keys=('image', 'label')),\n", + " AsChannelFirstd(keys=('image', 'label')),\n", + " Spacingd(keys=('image', 'label'), pixdim=(1.0, 1.0), mode=('bilinear', 'nearest')),\n", + " Orientationd(keys=('image', 'label'), axcodes=\"RAS\"),\n", + "]\n", + "\n", + "pre_transforms = [\n", + " AddChanneld(keys=('image', 'label')),\n", + " SpatialCropForegroundd(keys=('image', 'label'), source_key='label', spatial_size=roi_size),\n", + " Resized(keys=('image', 'label'), spatial_size=roi_size, mode=('area', 'nearest')),\n", + " NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0),\n", + " #ToTensord(keys=('image', 'label'))\n", + "]\n", + "\n", + "def show_image(image, label):\n", + " plt.figure(\"check\", (12, 6))\n", + " plt.subplot(1, 2, 1)\n", + " plt.title(\"image\")\n", + " plt.imshow(image, cmap=\"gray\")\n", + " plt.colorbar()\n", + "\n", + " plt.subplot(1, 2, 2)\n", + " plt.title(\"label\")\n", + " plt.imshow(label)\n", + " plt.colorbar()\n", + " plt.show()\n", + "\n", + "\n", + "for t in transforms:\n", + " tname = type(t).__name__ \n", + "\n", + " data = t(data)\n", + " image = data['image']\n", + " label = data['label']\n", + "\n", + " print(f\"{tname} => image shape: {image.shape}, label shape: {label.shape}\")\n", + "\n", + " image = image[:, :, slice_idx] if tname in ('LoadNiftid') else image[slice_idx, :, :]\n", + " label = label[:, :, slice_idx] if tname in ('LoadNiftid') else label[slice_idx, :, :]\n", + " show_image(image, label)\n", + " \n", + "for i in range(2, 3): # 6 is liver\n", + " pdata = copy.deepcopy(data)\n", + " image = pdata['image']\n", + " label = pdata['label']\n", + "\n", + " # Get slice and matching label\n", + " label = (label == i).astype(np.float32)\n", + " image = image[slice_idx, :, :]\n", + " label = label[slice_idx, :, :]\n", + " \n", + " if np.sum(label) == 0:\n", + " continue\n", + "\n", + " pdata['image'] = image\n", + " pdata['label'] = label\n", + "\n", + " for t in pre_transforms:\n", + " tname = type(t).__name__ \n", + " pdata = t(pdata) if tname != 'CropForegroundd' else pdata\n", + " \n", + " if tname == 'SpatialCropForegroundd':\n", + " print(\"Cropped size: {}\".format(pdata['image_meta_dict']['foreground_cropped_shape']))\n", + "\n", + " image = pdata['image']\n", + " label = pdata['label']\n", + " print(f\"region-{i}:: {tname} => image shape: {image.shape}, label shape: {label.shape}; sum: {np.sum(label)}; min: {np.min(label)}; max: {np.max(label)}\")\n", + "\n", + " show_image(image[0], label[0])\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/deepgrow/ignite/create_dataset_3d.ipynb b/deepgrow/ignite/create_dataset_3d.ipynb new file mode 100755 index 0000000000..48eb987ec8 --- /dev/null +++ b/deepgrow/ignite/create_dataset_3d.ipynb @@ -0,0 +1,237 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "import copy\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "from IPython.display import HTML\n", + "from celluloid import Camera # getting the camera\n", + "\n", + "from byoc.transforms import (\n", + " SpatialCropForegroundd,\n", + " AddInitialSeedPointd,\n", + " AddGuidanceSignald,\n", + " FindAllValidSlicesd,\n", + ")\n", + "from monai.transforms import (\n", + " LoadNiftid,\n", + " AsChannelFirstd,\n", + " Spacingd,\n", + " Orientationd,\n", + " AddChanneld,\n", + " NormalizeIntensityd,\n", + " Resized,\n", + ")\n", + "\n", + "def draw_points(guidance, slice_idx):\n", + " if guidance is None:\n", + " return\n", + " colors = ['r+', 'b+']\n", + " for color, points in zip(colors, guidance):\n", + " for p in points:\n", + " if p[-3] != slice_idx:\n", + " continue\n", + " p1 = p[-1]\n", + " p2 = p[-2]\n", + " plt.plot(p1, p2, color, 'MarkerSize', 30)\n", + "\n", + "\n", + "def show_image(image, label, slice_idx=None, guidance=None):\n", + " plt.figure(\"check\", (12, 6))\n", + " plt.subplot(1, 2, 1)\n", + " plt.title(\"image\")\n", + " plt.imshow(image, cmap=\"gray\")\n", + "\n", + " #if label is not None:\n", + " # masked = np.ma.masked_where(label == 0, label)\n", + " # plt.imshow(masked, 'jet', interpolation='none', alpha=0.7)\n", + "\n", + " draw_points(guidance, slice_idx)\n", + " plt.colorbar()\n", + "\n", + " if label is not None:\n", + " plt.subplot(1, 2, 2)\n", + " plt.title(\"label\")\n", + " plt.imshow(label)\n", + " plt.colorbar()\n", + " # draw_points(guidance, slice_idx)\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "roi_size = [128, 256, 256]\n", + "model_size = [128, 192, 192]\n", + "data = {'image': '/workspace/data/52432/Training/img/img0001.nii.gz', 'label': '/workspace/data/52432/Training/label/label0001.nii.gz'}\n", + "slice_idx = 111\n", + "region = 6 # liver = 6\n", + "\n", + "transforms = [\n", + " LoadNiftid(keys=('image', 'label')),\n", + " AsChannelFirstd(keys=('image', 'label')),\n", + " Spacingd(keys=('image', 'label'), pixdim=(1.0, 1.0, 1.0), mode=('bilinear', 'nearest')),\n", + " Orientationd(keys=('image', 'label'), axcodes=\"RAS\"),\n", + "]\n", + "\n", + "original_label = None\n", + "for t in transforms:\n", + " tname = type(t).__name__ \n", + "\n", + " data = t(data)\n", + " image = data['image']\n", + " label = data['label']\n", + "\n", + " print(f\"{tname} => image shape: {image.shape}, label shape: {label.shape}\")\n", + "\n", + " image = image[:, :, slice_idx] if tname in ('LoadNiftid') else image[slice_idx, :, :]\n", + " label = label[:, :, slice_idx] if tname in ('LoadNiftid') else label[slice_idx, :, :]\n", + " show_image(image, label)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pre_transforms = [\n", + " AddChanneld(keys=('image', 'label')),\n", + " SpatialCropForegroundd(keys=('image', 'label'), source_key='label', spatial_size=roi_size),\n", + " Resized(keys=('image', 'label'), spatial_size=model_size, mode=('area', 'nearest')),\n", + " NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0),\n", + " FindAllValidSlicesd(label='label', sids='sids'),\n", + "]\n", + "\n", + "pdata = copy.deepcopy(data)\n", + "pdata['label'] = pdata['label'] == region\n", + "original_label = None\n", + "\n", + "for t in pre_transforms:\n", + " tname = type(t).__name__ \n", + " pdata = t(pdata)\n", + "\n", + " image = pdata['image']\n", + " label = pdata['label']\n", + " guidance = pdata.get('guidance')\n", + "\n", + " if tname == 'AddChanneld':\n", + " original_label = label\n", + "\n", + " factor = 1 if original_label is None else label.shape[1] / original_label.shape[1]\n", + " sid = guidance[0][0][1] if guidance is not None else int(slice_idx * factor)\n", + " #print('Guidance: {}'.format(guidance.tolist() if guidance is not None else None))\n", + " print(f\"{tname} => {sid} => image shape: {image.shape}, label shape: {label.shape}\")\n", + "\n", + " image = image[0][sid]\n", + " label = label[0][sid]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('Total {} valid slices: {}'.format(len(pdata['sids'].tolist()), pdata['sids'].tolist()))\n", + "\n", + "rand_transforms = [\n", + " AddInitialSeedPointd(label='label', guidance='guidance'),\n", + " #AddGuidanceSignald(image='image', guidance='guidance'),\n", + " #ToTensord(keys=('image', 'label'))\n", + "]\n", + "\n", + "sid_counts = {}\n", + "for i in range(200):\n", + " rdata = copy.deepcopy(pdata)\n", + " #rdata['sids'] = None\n", + " for t in rand_transforms:\n", + " tname = type(t).__name__ \n", + " rdata = t(rdata)\n", + "\n", + " image = rdata['image']\n", + " label = rdata['label']\n", + " guidance = rdata.get('guidance')\n", + "\n", + " sid = guidance[0][0][1]\n", + " if sid_counts.get(sid) is None:\n", + " sid_counts[sid] = 0\n", + " sid_counts[sid] = sid_counts[sid] + 1\n", + " #print(f\"{tname} => {sid} => image shape: {image.shape}, label shape: {label.shape}\")\n", + "\n", + "print('Used sid count: {} of {}'.format(len(sid_counts), len(pdata['sids'])))\n", + "image = image[0][sid]\n", + "label = label[0][sid]\n", + "if tname == 'AddInitialSeedPointd':\n", + " show_image(image, label, sid, guidance)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(figsize=(18, 12)) # make it bigger\n", + "camera = Camera(fig) # the camera gets our figure\n", + "\n", + "rdata = rdata\n", + "image = rdata['image']\n", + "label = rdata['label']\n", + "for i in range(0, image.shape[1]):\n", + " # Get slice and matching label\n", + " if np.sum(label[0][i]) == 0:\n", + " continue\n", + "\n", + " j = int(i * original_label.shape[1] / label.shape[1])\n", + " #show_image(image[0][i], label[0][i], i)\n", + "\n", + " ax.imshow(image[0][i], cmap=\"gray\") # plotting\n", + " masked = np.ma.masked_where(label[0][i] == 0, label[0][i])\n", + " ax.imshow(masked, 'hsv', interpolation='none', alpha=0.7)\n", + " camera.snap()\n", + "\n", + "animation = camera.animate()\n", + "HTML(animation.to_html5_video())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/deepgrow/ignite/inference.ipynb b/deepgrow/ignite/inference.ipynb index 4e3deda5c8..7fd67b3952 100644 --- a/deepgrow/ignite/inference.ipynb +++ b/deepgrow/ignite/inference.ipynb @@ -11,7 +11,7 @@ "import torch\n", "from torch import jit\n", "\n", - "from monai.apps.deepgrow.transforms import (\n", + "from byoc.transforms import (\n", " AddGuidanceFromPointsd,\n", " AddGuidanceSignald,\n", " Fetch2DSliced,\n", @@ -63,7 +63,24 @@ " plt.imshow(label)\n", " plt.colorbar()\n", " # draw_points(guidance)\n", - " plt.show()" + " plt.show()\n", + "\n", + "\n", + "def print_data(data):\n", + " for k in data:\n", + " v = data[k]\n", + "\n", + " d = type(v)\n", + " if type(v) in (int, float, bool, str, dict, tuple):\n", + " d = v\n", + " elif hasattr(v, 'shape'):\n", + " d = v.shape\n", + "\n", + " if k in ('image_meta_dict', 'label_meta_dict'):\n", + " for m in data[k]:\n", + " print('{} Meta:: {} => {}'.format(k, m, data[k][m]))\n", + " else:\n", + " print('Data key: {} = {}'.format(k, d))\n" ] }, { @@ -209,4 +226,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/deepgrow/ignite/inference_3d.ipynb b/deepgrow/ignite/inference_3d.ipynb index 873398407c..450e1cc791 100644 --- a/deepgrow/ignite/inference_3d.ipynb +++ b/deepgrow/ignite/inference_3d.ipynb @@ -9,9 +9,11 @@ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import torch\n", + "from IPython.display import HTML\n", + "from celluloid import Camera # getting the camera\n", "from torch import jit\n", "\n", - "from monai.apps.deepgrow.transforms import (\n", + "from byoc.transforms import (\n", " AddGuidanceFromPointsd,\n", " AddGuidanceSignald,\n", " ResizeGuidanced,\n", @@ -32,18 +34,20 @@ ")\n", "\n", "\n", - "def draw_points(guidance):\n", + "def draw_points(guidance, slice_idx):\n", " if guidance is None:\n", " return\n", " colors = ['r+', 'b+']\n", " for color, points in zip(colors, guidance):\n", " for p in points:\n", + " if p[0] != slice_idx:\n", + " continue\n", " p1 = p[-1]\n", " p2 = p[-2]\n", " plt.plot(p1, p2, color, 'MarkerSize', 30)\n", "\n", "\n", - "def show_image(image, label, guidance=None):\n", + "def show_image(image, label, guidance=None, slice_idx=None):\n", " plt.figure(\"check\", (12, 6))\n", " plt.subplot(1, 2, 1)\n", " plt.title(\"image\")\n", @@ -53,7 +57,7 @@ " masked = np.ma.masked_where(label == 0, label)\n", " plt.imshow(masked, 'jet', interpolation='none', alpha=0.7)\n", "\n", - " draw_points(guidance)\n", + " draw_points(guidance, slice_idx)\n", " plt.colorbar()\n", "\n", " if label is not None:\n", @@ -61,8 +65,25 @@ " plt.title(\"label\")\n", " plt.imshow(label)\n", " plt.colorbar()\n", - " # draw_points(guidance)\n", - " plt.show()" + " # draw_points(guidance, slice_idx)\n", + " plt.show()\n", + "\n", + "\n", + "def print_data(data):\n", + " for k in data:\n", + " v = data[k]\n", + "\n", + " d = type(v)\n", + " if type(v) in (int, float, bool, str, dict, tuple):\n", + " d = v\n", + " elif hasattr(v, 'shape'):\n", + " d = v.shape\n", + "\n", + " if k in ('image_meta_dict', 'label_meta_dict'):\n", + " for m in data[k]:\n", + " print('{} Meta:: {} => {}'.format(k, m, data[k][m]))\n", + " else:\n", + " print('Data key: {} = {}'.format(k, d))\n" ] }, { @@ -72,8 +93,8 @@ "outputs": [], "source": [ "# Pre Processing\n", - "roi_size = [192, 192, 192]\n", - "model_size = [96, 96, 96]\n", + "roi_size = [128, 256, 256]\n", + "model_size = [128, 128, 128]\n", "pixdim = (1.0, 1.0, 1.0)\n", "dimensions = 3\n", "\n", @@ -81,7 +102,9 @@ " # 'image': '/salle/Downloads/spleen_19.nii.gz',\n", " # 'foreground': [[354, 336, 40],[259,381,40]],\n", " 'image': '/salle/Downloads/_image.nii.gz',\n", - " 'foreground': [[73, 177, 90]],\n", + " 'foreground': [[303, 154, 124], [202, 264, 124], [298, 215, 164], [298, 222, 35], [298, 216, 77], [286, 211, 176]],\n", + " #'foreground': [[67, 174, 105], [70, 174, 134], [70, 177, 151], [104, 141, 161], [100, 160, 177], [73, 176, 89], [99, 161, 183], [68, 171, 86], [105, 171, 186]],\n", + " #'foreground': [[278, 202, 142], [158, 269, 142], [306, 113, 142], [293, 220, 35], [326, 155, 67], [138, 268, 142], [330, 155, 97]], #[[73, 177, 90]],\n", " 'background': [],\n", "}\n", "slice_idx = original_slice_idx = data['foreground'][0][2]\n", @@ -123,27 +146,30 @@ " 'AsChannelFirstd', 'Spacingd', 'AddGuidanceFromPointsd') else image[0][slice_idx]\n", " label = None\n", "\n", - " show_image(image, label, guidance)\n", + " show_image(image, label, guidance, slice_idx)\n", " if tname == 'LoadNiftid':\n", " original_image = data['image']\n", " if tname == 'AddChanneld':\n", " original_image_slice = data['image']\n", " if tname == 'SpatialCropGuidanced':\n", - " spatial_image = data['image']\n" + " spatial_image = data['image']\n", + "\n", + "image = data['image']\n", + "label = data.get('label')\n", + "guidance = data.get('guidance')\n", + "for i in range(image.shape[1]):\n", + " print('Slice Idx: {}'.format(i))\n", + " show_image(image[0][i], None, guidance, i)\n" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, + "metadata": {}, "outputs": [], "source": [ "# Evaluation\n", - "model_path = '/workspace/Downloads/models/3d_roi_192_m96_b1_c64.ts'\n", + "model_path = '/workspace/Downloads/models/3d_r128x256x256_m128x128x128_c32.ts' #3d_roi_128x256x256_m96x192x192_b1_c32.ts' #3d_roi_192_m96_b1_c64.ts'\n", "model = jit.load(model_path)\n", "model.cuda()\n", "model.eval()\n", @@ -178,7 +204,7 @@ " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", " show_image(image, label)\n", - " elif tname == 'xToNumpyd':\n", + " elif tname == 'ToNumpyd':\n", " for i in range(label.shape[-1]):\n", " img = image[0, i, :, :].detach().cpu().numpy() if torch.is_tensor(image) else image[0][i]\n", " lab = label[0, i, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[0][i]\n", @@ -193,16 +219,35 @@ " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", " show_image(image, label)\n", "\n", + "fig, ax = plt.subplots() # make it bigger\n", + "camera = Camera(fig) # the camera gets our figure\n", + "\n", "for i in range(pred.shape[0]):\n", " image = original_image[:, :, i]\n", " label = pred[i, :, :]\n", " if np.sum(label) == 0:\n", " continue\n", "\n", - " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", - " i, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", - " show_image(image, label)\n" + " # print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", + " # i, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", + " # show_image(label, None)\n", + "\n", + " ax.imshow(image, cmap=\"gray\") # plotting\n", + " if np.sum(label) > 0:\n", + " masked = np.ma.masked_where(label == 0, label)\n", + " ax.imshow(masked, 'hsv', interpolation='none', alpha=0.7)\n", + " camera.snap()\n", + "\n", + "animation = camera.animate()\n", + "HTML(animation.to_html5_video())" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -226,4 +271,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/deepgrow/ignite/prepare_dataset.py b/deepgrow/ignite/prepare_dataset.py new file mode 100644 index 0000000000..1e9d6b927f --- /dev/null +++ b/deepgrow/ignite/prepare_dataset.py @@ -0,0 +1,92 @@ +import argparse +import distutils.util +import json +import logging +import os +import sys + +from byoc.dataset import create_dataset +from monai.data import partition_dataset + + +def prepare_datalist(args): + dimensions = args.dimensions + dataset_json = os.path.join(args.output, 'dataset.json') + if not os.path.exists(dataset_json): + logging.info('Processing dataset...') + with open(os.path.join(args.dataset_json)) as f: + datalist = json.load(f) + + datalist = create_dataset( + datalist=datalist[args.datalist_key], + base_dir=args.dataset_root, + output_dir=args.output, + dimension=dimensions, + pixdim=[1.0] * dimensions, + limit=args.limit, + relative_path=args.relative_path + ) + + with open(dataset_json, 'w') as fp: + json.dump(datalist, fp, indent=2) + else: + logging.info('Pre-load existing dataset.json') + + dataset_json = os.path.join(args.output, 'dataset.json') + with open(dataset_json) as f: + datalist = json.load(f) + logging.info('+++ Dataset File: {}'.format(dataset_json)) + logging.info('+++ Total Records: {}'.format(len(datalist))) + logging.info('') + + train_ds, val_ds = partition_dataset(datalist, ratios=[args.split, (1 - args.split)], shuffle=True, seed=args.seed) + dataset_json = os.path.join(args.output, 'dataset-0.json') + with open(dataset_json, 'w') as fp: + json.dump({'training': train_ds, 'validation': val_ds}, fp, indent=2) + + logging.info('*** Dataset File: {}'.format(dataset_json)) + logging.info('*** Total Records for Training: {}'.format(len(train_ds))) + logging.info('*** Total Records for Validation: {}'.format(len(val_ds))) + + +def run(args): + for arg in vars(args): + logging.info('USING:: {} = {}'.format(arg, getattr(args, arg))) + logging.info("") + + if not os.path.exists(args.output): + logging.info('output path [{}] does not exist. creating it now.'.format(args.output)) + os.makedirs(args.output, exist_ok=True) + prepare_datalist(args) + + +def strtobool(val): + return bool(distutils.util.strtobool(val)) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument('-s', '--seed', type=int, default=42) + parser.add_argument('-dims', '--dimensions', type=int, default=2) + + parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') + parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') + parser.add_argument('-k', '--datalist_key', default='training') + + parser.add_argument('-o', '--output', default='/workspace/data/52432/2D') + parser.add_argument('-x', '--split', type=float, default=0.9) + parser.add_argument('-t', '--limit', type=int, default=0) + parser.add_argument('-r', '--relative_path', type=strtobool, default='false') + + args = parser.parse_args() + run(args) + + +if __name__ == "__main__": + logging.basicConfig( + stream=sys.stdout, + level=logging.INFO, + format='[%(asctime)s.%(msecs)03d][%(levelname)5s] - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + main() diff --git a/deepgrow/ignite/train.py b/deepgrow/ignite/train.py index 7c2b4651d1..5414bb0e40 100644 --- a/deepgrow/ignite/train.py +++ b/deepgrow/ignite/train.py @@ -9,14 +9,15 @@ import torch import torch.distributed as dist -from monai.apps.deepgrow import ( +from byoc.handler import DeepgrowStatsHandler +from byoc.interaction import Interaction +from byoc.transforms import ( SpatialCropForegroundd, AddInitialSeedPointd, FindDiscrepancyRegionsd, AddRandomGuidanced, AddGuidanceSignald, - create_dataset, - Interaction + FindAllValidSlicesd, ) from monai.data import partition_dataset from monai.data.dataloader import DataLoader @@ -70,27 +71,27 @@ def get_network(network, channels, dimensions): return network -def get_pre_transforms(roi_size, model_size, dimensions): +def get_pre_transforms(roi_size, model_size): return Compose([ LoadNumpyd(keys=('image', 'label')), AddChanneld(keys=('image', 'label')), SpatialCropForegroundd(keys=('image', 'label'), source_key='label', spatial_size=roi_size), Resized(keys=('image', 'label'), spatial_size=model_size, mode=('area', 'nearest')), NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0), - AddInitialSeedPointd(label='label', guidance='guidance', dimensions=dimensions), - AddGuidanceSignald(image='image', guidance='guidance', dimensions=dimensions), + FindAllValidSlicesd(label='label', sids='sids'), + AddInitialSeedPointd(label='label', guidance='guidance', sids='sids'), + AddGuidanceSignald(image='image', guidance='guidance'), ToTensord(keys=('image', 'label')) ]) -def get_click_transforms(dimensions): +def get_click_transforms(): return Compose([ Activationsd(keys='pred', sigmoid=True), ToNumpyd(keys=('image', 'label', 'pred', 'probability', 'guidance')), FindDiscrepancyRegionsd(label='label', pred='pred', discrepancy='discrepancy', batched=True), - AddRandomGuidanced(guidance='guidance', discrepancy='discrepancy', probability='probability', - dimensions=dimensions, batched=True), - AddGuidanceSignald(image='image', guidance='guidance', dimensions=dimensions, batched=True), + AddRandomGuidanced(guidance='guidance', discrepancy='discrepancy', probability='probability', batched=True), + AddGuidanceSignald(image='image', guidance='guidance', batched=True), ToTensord(keys=('image', 'label')) ]) @@ -105,30 +106,13 @@ def get_post_transforms(): def get_loaders(args, pre_transforms, train=True): multi_gpu = args.multi_gpu local_rank = args.local_rank - dimensions = args.dimensions - dataset_json = os.path.join(args.input, 'dataset.json') - if not os.path.exists(dataset_json): - with open(os.path.join(args.dataset_json)) as f: - datalist = json.load(f) - - datalist = create_dataset( - datalist=datalist['training'], - base_dir=args.dataset_root, - output_dir=os.path.join(args.input), - dimension=dimensions, - pixdim=[1.0] * dimensions - ) - - with open(dataset_json, 'w') as fp: - json.dump(datalist, fp, indent=2) - - dataset_json = os.path.join(args.input, 'dataset.json') + dataset_json = os.path.join(args.input) with open(dataset_json) as f: datalist = json.load(f) total_d = len(datalist) - datalist = datalist[-args.limit:] + datalist = datalist[0:args.limit] if args.limit else datalist total_l = len(datalist) if multi_gpu: @@ -141,9 +125,13 @@ def get_loaders(args, pre_transforms, train=True): )[local_rank] if train: - train_datalist, val_datalist = partition_dataset(datalist, ratios=[args.split, (1 - args.split)]) + train_datalist, val_datalist = partition_dataset( + datalist, + ratios=[args.split, (1 - args.split)], + shuffle=True, + seed=args.seed) - train_ds = PersistentDataset(train_datalist, pre_transforms) + train_ds = PersistentDataset(train_datalist, pre_transforms, cache_dir=args.cache_dir) train_loader = DataLoader( train_ds, batch_size=args.batch, @@ -155,7 +143,7 @@ def get_loaders(args, pre_transforms, train=True): train_loader = None val_datalist = datalist - val_ds = PersistentDataset(val_datalist, pre_transforms) + val_ds = PersistentDataset(val_datalist, pre_transforms, cache_dir=args.cache_dir) val_loader = DataLoader(val_ds, batch_size=args.batch, num_workers=8) logging.info('{}:: Total Records used for Validation is: {}/{}/{}'.format( local_rank, len(val_ds), total_l, total_d)) @@ -175,8 +163,8 @@ def create_trainer(args): else: device = torch.device("cuda" if args.use_gpu else "cpu") - pre_transforms = get_pre_transforms(json.loads(args.roi_size), json.loads(args.model_size), args.dimensions) - click_transforms = get_click_transforms(args.dimensions) + pre_transforms = get_pre_transforms(args.roi_size, args.model_size) + click_transforms = get_click_transforms() post_transform = get_post_transforms() train_loader, val_loader = get_loaders(args, pre_transforms) @@ -195,10 +183,14 @@ def create_trainer(args): val_handlers = [ StatsHandler(output_transform=lambda x: None), TensorBoardStatsHandler(log_dir=args.output, output_transform=lambda x: None), + DeepgrowStatsHandler(log_dir=args.output, tag_name='val_dice', image_interval=args.image_interval), CheckpointSaver(save_dir=args.output, save_dict={"net": network}, save_key_metric=True, save_final=True, save_interval=args.save_interval, final_filename='model.pt') ] - val_handlers = val_handlers if local_rank == 0 else None + val_handlers = val_handlers if local_rank == 0 else None if args.dimensions == 2 else [ + DeepgrowStatsHandler(log_dir=args.output, tag_name='val_dice', image_interval=args.image_interval) + ] + evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loader, @@ -260,6 +252,9 @@ def create_trainer(args): def run(args): + args.roi_size = json.loads(args.roi_size) + args.model_size = json.loads(args.model_size) + if args.local_rank == 0: for arg in vars(args): logging.info('USING:: {} = {}'.format(arg, getattr(args, arg))) @@ -309,14 +304,12 @@ def strtobool(val): def main(): parser = argparse.ArgumentParser() - parser.add_argument('-s', '--seed', type=int, default=42) + parser.add_argument('-s', '--seed', type=int, default=23) parser.add_argument('--dimensions', type=int, default=2) parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) parser.add_argument('-c', '--channels', type=int, default=32) - parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') - parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') - parser.add_argument('-i', '--input', default='/workspace/data/52432/2D') + parser.add_argument('-i', '--input', default='/workspace/data/52432/2D/dataset.json') parser.add_argument('-o', '--output', default='output') parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') @@ -324,8 +317,9 @@ def main(): parser.add_argument('-e', '--epochs', type=int, default=100) parser.add_argument('-b', '--batch', type=int, default=8) - parser.add_argument('-x', '--split', type=float, default=0.8) + parser.add_argument('-x', '--split', type=float, default=0.9) parser.add_argument('-t', '--limit', type=int, default=0) + parser.add_argument('--cache_dir', type=str, default=None) parser.add_argument('-r', '--resume', type=strtobool, default='false') parser.add_argument('-m', '--model_path', default="output/model.pt") @@ -337,7 +331,7 @@ def main(): parser.add_argument('-it', '--max_train_interactions', type=int, default=15) parser.add_argument('-iv', '--max_val_interactions', type=int, default=5) - parser.add_argument('--save_interval', type=int, default=10) + parser.add_argument('--save_interval', type=int, default=3) parser.add_argument('--image_interval', type=int, default=1) parser.add_argument('--multi_gpu', type=strtobool, default='false') parser.add_argument('--local_rank', type=int, default=0) diff --git a/deepgrow/ignite/train_3d.py b/deepgrow/ignite/train_3d.py index 54df1de8c5..9ef77d4ec8 100644 --- a/deepgrow/ignite/train_3d.py +++ b/deepgrow/ignite/train_3d.py @@ -19,14 +19,12 @@ def strtobool(val): parser = argparse.ArgumentParser() - parser.add_argument('-s', '--seed', type=int, default=42) + parser.add_argument('-s', '--seed', type=int, default=23) parser.add_argument('--dimensions', type=int, default=3) parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) parser.add_argument('-c', '--channels', type=int, default=32) - parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') - parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') - parser.add_argument('-i', '--input', default='/workspace/data/52432/3D') + parser.add_argument('-i', '--input', default='/workspace/data/52432/3D/flatten/dataset.json') parser.add_argument('-o', '--output', default='output3D') parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') @@ -36,25 +34,26 @@ def strtobool(val): parser.add_argument('-b', '--batch', type=int, default=1) parser.add_argument('-x', '--split', type=float, default=0.9) parser.add_argument('-t', '--limit', type=int, default=0) + parser.add_argument('--cache_dir', type=str, default=None) parser.add_argument('-r', '--resume', type=strtobool, default='false') parser.add_argument('-m', '--model_path', default="output3D/model.pt") - parser.add_argument('--roi_size', default="[256, 256, 256]") - parser.add_argument('--model_size', default="[128, 128, 128]") + parser.add_argument('--roi_size', default="[128, 256, 256]") + parser.add_argument('--model_size', default="[128, 192, 192]") parser.add_argument('-f', '--val_freq', type=int, default=1) parser.add_argument('-lr', '--learning_rate', type=float, default=0.0001) parser.add_argument('-it', '--max_train_interactions', type=int, default=15) - parser.add_argument('-iv', '--max_val_interactions', type=int, default=20) + parser.add_argument('-iv', '--max_val_interactions', type=int, default=10) - parser.add_argument('--save_interval', type=int, default=10) + parser.add_argument('--save_interval', type=int, default=20) parser.add_argument('--image_interval', type=int, default=5) parser.add_argument('--multi_gpu', type=strtobool, default='false') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--export', type=strtobool, default='false') args = parser.parse_args() - train.main(args) + train.run(args) ''' # Single GPU (it will also export) diff --git a/deepgrow/ignite/validate.py b/deepgrow/ignite/validate.py index f2b643f759..aa08346f71 100644 --- a/deepgrow/ignite/validate.py +++ b/deepgrow/ignite/validate.py @@ -9,8 +9,8 @@ import torch import train -from monai.apps.deepgrow.handler import DeepgrowStatsHandler, SegmentationSaver -from monai.apps.deepgrow.interaction import Interaction +from byoc.handler import DeepgrowStatsHandler, SegmentationSaver +from byoc.interaction import Interaction from monai.engines import SupervisedEvaluator from monai.handlers import ( StatsHandler, @@ -25,8 +25,8 @@ def create_validator(args, click): device = torch.device("cuda" if args.use_gpu else "cpu") - pre_transforms = train.get_pre_transforms(json.loads(args.roi_size), json.loads(args.model_size), args.dimensions) - click_transforms = train.get_click_transforms(args.dimensions) + pre_transforms = train.get_pre_transforms(json.loads(args.roi_size), json.loads(args.model_size)) + click_transforms = train.get_click_transforms() post_transform = train.get_post_transforms() # define training components @@ -50,8 +50,7 @@ def create_validator(args, click): DeepgrowStatsHandler( log_dir=args.output, tag_name=f'clicks_{click}_val_dice', - fold_size=int(len(val_loader.dataset) / args.batch / args.folds) if args.folds else 0, - add_stdev=False, + fold_size=int(len(val_loader.dataset) / args.batch / args.folds) if args.folds else 0 ), ] if args.save_seg: @@ -93,10 +92,10 @@ def run(args): logging.info('+++++++++++++++++++++++++++++++++++++++++++++++++++++') logging.info(' CLICKS = {}'.format(click)) logging.info('+++++++++++++++++++++++++++++++++++++++++++++++++++++') - trainer = create_validator(args, click) + evaluator = create_validator(args, click) start_time = time.time() - trainer.run() + evaluator.run() end_time = time.time() logging.info('Total Run Time {}'.format(end_time - start_time)) @@ -109,16 +108,14 @@ def strtobool(val): def main(): parser = argparse.ArgumentParser() - parser.add_argument('-s', '--seed', type=int, default=42) - parser.add_argument('--dimensions', type=int, default=3) + parser.add_argument('-s', '--seed', type=int, default=23) + parser.add_argument('--dimensions', type=int, default=2) parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) parser.add_argument('-c', '--channels', type=int, default=32) parser.add_argument('-f', '--folds', type=int, default=10) - parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') - parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') - parser.add_argument('-i', '--input', default='/workspace/data/52432/2D') + parser.add_argument('-i', '--input', default='/workspace/data/52432/2D/dataset.json') parser.add_argument('-o', '--output', default='eval') parser.add_argument('--save_seg', type=strtobool, default='false') @@ -127,6 +124,7 @@ def main(): parser.add_argument('-t', '--limit', type=int, default=20) parser.add_argument('-m', '--model_path', default="output/model.pt") parser.add_argument('--roi_size', default="[256, 256]") + parser.add_argument('--model_size', default="[256, 256]") parser.add_argument('-iv', '--max_val_interactions', default="[0,1,2,5,10,15]") parser.add_argument('--multi_gpu', type=strtobool, default='false') diff --git a/deepgrow/ignite/validate_3d.py b/deepgrow/ignite/validate_3d.py index 0170493a5c..17a214257f 100644 --- a/deepgrow/ignite/validate_3d.py +++ b/deepgrow/ignite/validate_3d.py @@ -13,16 +13,14 @@ def strtobool(val): def main(): parser = argparse.ArgumentParser() - parser.add_argument('-s', '--seed', type=int, default=42) + parser.add_argument('-s', '--seed', type=int, default=23) parser.add_argument('--dimensions', type=int, default=3) parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) parser.add_argument('-c', '--channels', type=int, default=32) parser.add_argument('-f', '--folds', type=int, default=10) - parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') - parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') - parser.add_argument('-i', '--input', default='/workspace/data/52432/3D') + parser.add_argument('-i', '--input', default='/workspace/data/52432/3D/dataset.json') parser.add_argument('-o', '--output', default='eval3D') parser.add_argument('--save_seg', type=strtobool, default='false') @@ -30,7 +28,8 @@ def main(): parser.add_argument('-b', '--batch', type=int, default=1) parser.add_argument('-t', '--limit', type=int, default=20) parser.add_argument('-m', '--model_path', default="output3D/model.pt") - parser.add_argument('--roi_size', default="[128, 128, 128]") + parser.add_argument('--roi_size', default="[256, 256, 256]") + parser.add_argument('--model_size', default="[128, 128, 128]") parser.add_argument('-iv', '--max_val_interactions', default="[0,1,2,5,10,15]") parser.add_argument('--multi_gpu', type=strtobool, default='false') From ee58dddb64ee8cdcf5acdb93c375f1731059c7d4 Mon Sep 17 00:00:00 2001 From: Sachidanand Alle Date: Fri, 15 Jan 2021 16:54:05 -0800 Subject: [PATCH 04/10] fix monai changes Signed-off-by: Sachidanand Alle --- deepgrow/ignite/create_dataset.ipynb | 141 -------------- deepgrow/ignite/create_dataset_3d.ipynb | 237 ------------------------ deepgrow/ignite/inference.ipynb | 8 +- deepgrow/ignite/inference_3d.ipynb | 10 +- deepgrow/ignite/prepare_dataset.py | 2 +- deepgrow/ignite/train.py | 17 +- deepgrow/ignite/train_3d.py | 6 +- deepgrow/ignite/validate.py | 10 +- deepgrow/ignite/validate_3d.py | 1 + 9 files changed, 29 insertions(+), 403 deletions(-) delete mode 100755 deepgrow/ignite/create_dataset.ipynb delete mode 100755 deepgrow/ignite/create_dataset_3d.ipynb diff --git a/deepgrow/ignite/create_dataset.ipynb b/deepgrow/ignite/create_dataset.ipynb deleted file mode 100755 index bed88a63f9..0000000000 --- a/deepgrow/ignite/create_dataset.ipynb +++ /dev/null @@ -1,141 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import argparse\n", - "import glob\n", - "import json\n", - "import os\n", - "import shutil\n", - "import copy\n", - "\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "\n", - "from monai.config import print_config\n", - "from monai.transforms import (\n", - " LoadNiftid,\n", - " AsChannelFirstd,\n", - " Spacingd,\n", - " Orientationd,\n", - "\n", - " AddChanneld,\n", - " Resized,\n", - " NormalizeIntensityd,\n", - " ToTensord\n", - ")\n", - "\n", - "from byoc import (\n", - " SpatialCropForegroundd\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "roi_size = [256, 256]\n", - "data = {'image': '/workspace/data/52432/Training/img/img0001.nii.gz', 'label': '/workspace/data/52432/Training/label/label0001.nii.gz'}\n", - "slice_idx = 111\n", - "\n", - "transforms = [\n", - " LoadNiftid(keys=('image', 'label')),\n", - " AsChannelFirstd(keys=('image', 'label')),\n", - " Spacingd(keys=('image', 'label'), pixdim=(1.0, 1.0), mode=('bilinear', 'nearest')),\n", - " Orientationd(keys=('image', 'label'), axcodes=\"RAS\"),\n", - "]\n", - "\n", - "pre_transforms = [\n", - " AddChanneld(keys=('image', 'label')),\n", - " SpatialCropForegroundd(keys=('image', 'label'), source_key='label', spatial_size=roi_size),\n", - " Resized(keys=('image', 'label'), spatial_size=roi_size, mode=('area', 'nearest')),\n", - " NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0),\n", - " #ToTensord(keys=('image', 'label'))\n", - "]\n", - "\n", - "def show_image(image, label):\n", - " plt.figure(\"check\", (12, 6))\n", - " plt.subplot(1, 2, 1)\n", - " plt.title(\"image\")\n", - " plt.imshow(image, cmap=\"gray\")\n", - " plt.colorbar()\n", - "\n", - " plt.subplot(1, 2, 2)\n", - " plt.title(\"label\")\n", - " plt.imshow(label)\n", - " plt.colorbar()\n", - " plt.show()\n", - "\n", - "\n", - "for t in transforms:\n", - " tname = type(t).__name__ \n", - "\n", - " data = t(data)\n", - " image = data['image']\n", - " label = data['label']\n", - "\n", - " print(f\"{tname} => image shape: {image.shape}, label shape: {label.shape}\")\n", - "\n", - " image = image[:, :, slice_idx] if tname in ('LoadNiftid') else image[slice_idx, :, :]\n", - " label = label[:, :, slice_idx] if tname in ('LoadNiftid') else label[slice_idx, :, :]\n", - " show_image(image, label)\n", - " \n", - "for i in range(2, 3): # 6 is liver\n", - " pdata = copy.deepcopy(data)\n", - " image = pdata['image']\n", - " label = pdata['label']\n", - "\n", - " # Get slice and matching label\n", - " label = (label == i).astype(np.float32)\n", - " image = image[slice_idx, :, :]\n", - " label = label[slice_idx, :, :]\n", - " \n", - " if np.sum(label) == 0:\n", - " continue\n", - "\n", - " pdata['image'] = image\n", - " pdata['label'] = label\n", - "\n", - " for t in pre_transforms:\n", - " tname = type(t).__name__ \n", - " pdata = t(pdata) if tname != 'CropForegroundd' else pdata\n", - " \n", - " if tname == 'SpatialCropForegroundd':\n", - " print(\"Cropped size: {}\".format(pdata['image_meta_dict']['foreground_cropped_shape']))\n", - "\n", - " image = pdata['image']\n", - " label = pdata['label']\n", - " print(f\"region-{i}:: {tname} => image shape: {image.shape}, label shape: {label.shape}; sum: {np.sum(label)}; min: {np.min(label)}; max: {np.max(label)}\")\n", - "\n", - " show_image(image[0], label[0])\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.10" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/deepgrow/ignite/create_dataset_3d.ipynb b/deepgrow/ignite/create_dataset_3d.ipynb deleted file mode 100755 index 48eb987ec8..0000000000 --- a/deepgrow/ignite/create_dataset_3d.ipynb +++ /dev/null @@ -1,237 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "import copy\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "from IPython.display import HTML\n", - "from celluloid import Camera # getting the camera\n", - "\n", - "from byoc.transforms import (\n", - " SpatialCropForegroundd,\n", - " AddInitialSeedPointd,\n", - " AddGuidanceSignald,\n", - " FindAllValidSlicesd,\n", - ")\n", - "from monai.transforms import (\n", - " LoadNiftid,\n", - " AsChannelFirstd,\n", - " Spacingd,\n", - " Orientationd,\n", - " AddChanneld,\n", - " NormalizeIntensityd,\n", - " Resized,\n", - ")\n", - "\n", - "def draw_points(guidance, slice_idx):\n", - " if guidance is None:\n", - " return\n", - " colors = ['r+', 'b+']\n", - " for color, points in zip(colors, guidance):\n", - " for p in points:\n", - " if p[-3] != slice_idx:\n", - " continue\n", - " p1 = p[-1]\n", - " p2 = p[-2]\n", - " plt.plot(p1, p2, color, 'MarkerSize', 30)\n", - "\n", - "\n", - "def show_image(image, label, slice_idx=None, guidance=None):\n", - " plt.figure(\"check\", (12, 6))\n", - " plt.subplot(1, 2, 1)\n", - " plt.title(\"image\")\n", - " plt.imshow(image, cmap=\"gray\")\n", - "\n", - " #if label is not None:\n", - " # masked = np.ma.masked_where(label == 0, label)\n", - " # plt.imshow(masked, 'jet', interpolation='none', alpha=0.7)\n", - "\n", - " draw_points(guidance, slice_idx)\n", - " plt.colorbar()\n", - "\n", - " if label is not None:\n", - " plt.subplot(1, 2, 2)\n", - " plt.title(\"label\")\n", - " plt.imshow(label)\n", - " plt.colorbar()\n", - " # draw_points(guidance, slice_idx)\n", - " plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "roi_size = [128, 256, 256]\n", - "model_size = [128, 192, 192]\n", - "data = {'image': '/workspace/data/52432/Training/img/img0001.nii.gz', 'label': '/workspace/data/52432/Training/label/label0001.nii.gz'}\n", - "slice_idx = 111\n", - "region = 6 # liver = 6\n", - "\n", - "transforms = [\n", - " LoadNiftid(keys=('image', 'label')),\n", - " AsChannelFirstd(keys=('image', 'label')),\n", - " Spacingd(keys=('image', 'label'), pixdim=(1.0, 1.0, 1.0), mode=('bilinear', 'nearest')),\n", - " Orientationd(keys=('image', 'label'), axcodes=\"RAS\"),\n", - "]\n", - "\n", - "original_label = None\n", - "for t in transforms:\n", - " tname = type(t).__name__ \n", - "\n", - " data = t(data)\n", - " image = data['image']\n", - " label = data['label']\n", - "\n", - " print(f\"{tname} => image shape: {image.shape}, label shape: {label.shape}\")\n", - "\n", - " image = image[:, :, slice_idx] if tname in ('LoadNiftid') else image[slice_idx, :, :]\n", - " label = label[:, :, slice_idx] if tname in ('LoadNiftid') else label[slice_idx, :, :]\n", - " show_image(image, label)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pre_transforms = [\n", - " AddChanneld(keys=('image', 'label')),\n", - " SpatialCropForegroundd(keys=('image', 'label'), source_key='label', spatial_size=roi_size),\n", - " Resized(keys=('image', 'label'), spatial_size=model_size, mode=('area', 'nearest')),\n", - " NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0),\n", - " FindAllValidSlicesd(label='label', sids='sids'),\n", - "]\n", - "\n", - "pdata = copy.deepcopy(data)\n", - "pdata['label'] = pdata['label'] == region\n", - "original_label = None\n", - "\n", - "for t in pre_transforms:\n", - " tname = type(t).__name__ \n", - " pdata = t(pdata)\n", - "\n", - " image = pdata['image']\n", - " label = pdata['label']\n", - " guidance = pdata.get('guidance')\n", - "\n", - " if tname == 'AddChanneld':\n", - " original_label = label\n", - "\n", - " factor = 1 if original_label is None else label.shape[1] / original_label.shape[1]\n", - " sid = guidance[0][0][1] if guidance is not None else int(slice_idx * factor)\n", - " #print('Guidance: {}'.format(guidance.tolist() if guidance is not None else None))\n", - " print(f\"{tname} => {sid} => image shape: {image.shape}, label shape: {label.shape}\")\n", - "\n", - " image = image[0][sid]\n", - " label = label[0][sid]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print('Total {} valid slices: {}'.format(len(pdata['sids'].tolist()), pdata['sids'].tolist()))\n", - "\n", - "rand_transforms = [\n", - " AddInitialSeedPointd(label='label', guidance='guidance'),\n", - " #AddGuidanceSignald(image='image', guidance='guidance'),\n", - " #ToTensord(keys=('image', 'label'))\n", - "]\n", - "\n", - "sid_counts = {}\n", - "for i in range(200):\n", - " rdata = copy.deepcopy(pdata)\n", - " #rdata['sids'] = None\n", - " for t in rand_transforms:\n", - " tname = type(t).__name__ \n", - " rdata = t(rdata)\n", - "\n", - " image = rdata['image']\n", - " label = rdata['label']\n", - " guidance = rdata.get('guidance')\n", - "\n", - " sid = guidance[0][0][1]\n", - " if sid_counts.get(sid) is None:\n", - " sid_counts[sid] = 0\n", - " sid_counts[sid] = sid_counts[sid] + 1\n", - " #print(f\"{tname} => {sid} => image shape: {image.shape}, label shape: {label.shape}\")\n", - "\n", - "print('Used sid count: {} of {}'.format(len(sid_counts), len(pdata['sids'])))\n", - "image = image[0][sid]\n", - "label = label[0][sid]\n", - "if tname == 'AddInitialSeedPointd':\n", - " show_image(image, label, sid, guidance)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, ax = plt.subplots(figsize=(18, 12)) # make it bigger\n", - "camera = Camera(fig) # the camera gets our figure\n", - "\n", - "rdata = rdata\n", - "image = rdata['image']\n", - "label = rdata['label']\n", - "for i in range(0, image.shape[1]):\n", - " # Get slice and matching label\n", - " if np.sum(label[0][i]) == 0:\n", - " continue\n", - "\n", - " j = int(i * original_label.shape[1] / label.shape[1])\n", - " #show_image(image[0][i], label[0][i], i)\n", - "\n", - " ax.imshow(image[0][i], cmap=\"gray\") # plotting\n", - " masked = np.ma.masked_where(label[0][i] == 0, label[0][i])\n", - " ax.imshow(masked, 'hsv', interpolation='none', alpha=0.7)\n", - " camera.snap()\n", - "\n", - "animation = camera.animate()\n", - "HTML(animation.to_html5_video())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.10" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/deepgrow/ignite/inference.ipynb b/deepgrow/ignite/inference.ipynb index 7fd67b3952..482616d2f7 100644 --- a/deepgrow/ignite/inference.ipynb +++ b/deepgrow/ignite/inference.ipynb @@ -11,7 +11,7 @@ "import torch\n", "from torch import jit\n", "\n", - "from byoc.transforms import (\n", + "from monai.apps.deepgrow.transforms import (\n", " AddGuidanceFromPointsd,\n", " AddGuidanceSignald,\n", " Fetch2DSliced,\n", @@ -22,7 +22,7 @@ "from monai.transforms import (\n", " AsChannelFirstd,\n", " Spacingd,\n", - " LoadNiftid,\n", + " LoadImaged,\n", " AddChanneld,\n", " NormalizeIntensityd,\n", " ToTensord,\n", @@ -103,7 +103,7 @@ "slice_idx = original_slice_idx = data['foreground'][0][2]\n", "\n", "pre_transforms = [\n", - " LoadNiftid(keys='image'),\n", + " LoadImaged(keys='image'),\n", " AsChannelFirstd(keys='image'),\n", " Spacingd(keys='image', pixdim=pixdim, mode='bilinear'),\n", "\n", @@ -226,4 +226,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/deepgrow/ignite/inference_3d.ipynb b/deepgrow/ignite/inference_3d.ipynb index 450e1cc791..9d24250de8 100644 --- a/deepgrow/ignite/inference_3d.ipynb +++ b/deepgrow/ignite/inference_3d.ipynb @@ -13,7 +13,7 @@ "from celluloid import Camera # getting the camera\n", "from torch import jit\n", "\n", - "from byoc.transforms import (\n", + "from monai.apps.deepgrow.transforms import (\n", " AddGuidanceFromPointsd,\n", " AddGuidanceSignald,\n", " ResizeGuidanced,\n", @@ -23,7 +23,7 @@ "from monai.transforms import (\n", " AsChannelFirstd,\n", " Spacingd,\n", - " LoadNiftid,\n", + " LoadImaged,\n", " AddChanneld,\n", " NormalizeIntensityd,\n", " ToTensord,\n", @@ -110,7 +110,7 @@ "slice_idx = original_slice_idx = data['foreground'][0][2]\n", "\n", "pre_transforms = [\n", - " LoadNiftid(keys='image'),\n", + " LoadImaged(keys='image'),\n", " AsChannelFirstd(keys='image'),\n", " Spacingd(keys='image', pixdim=pixdim, mode='bilinear'),\n", "\n", @@ -122,7 +122,7 @@ " Resized(keys='image', spatial_size=model_size, mode='area'),\n", " ResizeGuidanced(guidance='guidance', ref_image='image'),\n", " NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0),\n", - " AddGuidanceSignald(image='image', guidance='guidance', dimensions=dimensions),\n", + " AddGuidanceSignald(image='image', guidance='guidance'),\n", " ToTensord(keys='image')\n", "]\n", "\n", @@ -271,4 +271,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/deepgrow/ignite/prepare_dataset.py b/deepgrow/ignite/prepare_dataset.py index 1e9d6b927f..dbfa4a8f7c 100644 --- a/deepgrow/ignite/prepare_dataset.py +++ b/deepgrow/ignite/prepare_dataset.py @@ -5,7 +5,7 @@ import os import sys -from byoc.dataset import create_dataset +from monai.apps.deepgrow.dataset import create_dataset from monai.data import partition_dataset diff --git a/deepgrow/ignite/train.py b/deepgrow/ignite/train.py index 5414bb0e40..0bd9050f1f 100644 --- a/deepgrow/ignite/train.py +++ b/deepgrow/ignite/train.py @@ -9,9 +9,9 @@ import torch import torch.distributed as dist -from byoc.handler import DeepgrowStatsHandler -from byoc.interaction import Interaction -from byoc.transforms import ( +from monai.apps.deepgrow.handler import DeepgrowStatsHandler +from monai.apps.deepgrow.interaction import Interaction +from monai.apps.deepgrow.transforms import ( SpatialCropForegroundd, AddInitialSeedPointd, FindDiscrepancyRegionsd, @@ -33,10 +33,11 @@ MeanDice) from monai.inferers import SimpleInferer from monai.losses import DiceLoss -from monai.networks.nets import BasicUNet, UNet, Norm +from monai.networks.layers import Norm +from monai.networks.nets import BasicUNet, UNet from monai.transforms import ( Compose, - LoadNumpyd, + LoadImaged, AddChanneld, NormalizeIntensityd, ToTensord, @@ -73,7 +74,7 @@ def get_network(network, channels, dimensions): def get_pre_transforms(roi_size, model_size): return Compose([ - LoadNumpyd(keys=('image', 'label')), + LoadImaged(keys=('image', 'label')), AddChanneld(keys=('image', 'label')), SpatialCropForegroundd(keys=('image', 'label'), source_key='label', spatial_size=roi_size), Resized(keys=('image', 'label'), spatial_size=model_size, mode=('area', 'nearest')), @@ -187,9 +188,7 @@ def create_trainer(args): CheckpointSaver(save_dir=args.output, save_dict={"net": network}, save_key_metric=True, save_final=True, save_interval=args.save_interval, final_filename='model.pt') ] - val_handlers = val_handlers if local_rank == 0 else None if args.dimensions == 2 else [ - DeepgrowStatsHandler(log_dir=args.output, tag_name='val_dice', image_interval=args.image_interval) - ] + val_handlers = val_handlers if local_rank == 0 else None evaluator = SupervisedEvaluator( device=device, diff --git a/deepgrow/ignite/train_3d.py b/deepgrow/ignite/train_3d.py index 9ef77d4ec8..ef6b2e53b1 100644 --- a/deepgrow/ignite/train_3d.py +++ b/deepgrow/ignite/train_3d.py @@ -24,7 +24,7 @@ def strtobool(val): parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) parser.add_argument('-c', '--channels', type=int, default=32) - parser.add_argument('-i', '--input', default='/workspace/data/52432/3D/flatten/dataset.json') + parser.add_argument('-i', '--input', default='/workspace/data/52432/3D/dataset.json') parser.add_argument('-o', '--output', default='output3D') parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') @@ -38,13 +38,13 @@ def strtobool(val): parser.add_argument('-r', '--resume', type=strtobool, default='false') parser.add_argument('-m', '--model_path', default="output3D/model.pt") - parser.add_argument('--roi_size', default="[128, 256, 256]") + parser.add_argument('--roi_size', default="[128, 192, 192]") parser.add_argument('--model_size', default="[128, 192, 192]") parser.add_argument('-f', '--val_freq', type=int, default=1) parser.add_argument('-lr', '--learning_rate', type=float, default=0.0001) parser.add_argument('-it', '--max_train_interactions', type=int, default=15) - parser.add_argument('-iv', '--max_val_interactions', type=int, default=10) + parser.add_argument('-iv', '--max_val_interactions', type=int, default=20) parser.add_argument('--save_interval', type=int, default=20) parser.add_argument('--image_interval', type=int, default=5) diff --git a/deepgrow/ignite/validate.py b/deepgrow/ignite/validate.py index aa08346f71..d16810c549 100644 --- a/deepgrow/ignite/validate.py +++ b/deepgrow/ignite/validate.py @@ -9,8 +9,8 @@ import torch import train -from byoc.handler import DeepgrowStatsHandler, SegmentationSaver -from byoc.interaction import Interaction +from monai.apps.deepgrow.handler import DeepgrowStatsHandler, SegmentationSaver +from monai.apps.deepgrow.interaction import Interaction from monai.engines import SupervisedEvaluator from monai.handlers import ( StatsHandler, @@ -25,7 +25,7 @@ def create_validator(args, click): device = torch.device("cuda" if args.use_gpu else "cpu") - pre_transforms = train.get_pre_transforms(json.loads(args.roi_size), json.loads(args.model_size)) + pre_transforms = train.get_pre_transforms(args.roi_size, args.model_size) click_transforms = train.get_click_transforms() post_transform = train.get_post_transforms() @@ -78,6 +78,9 @@ def create_validator(args, click): def run(args): + args.roi_size = json.loads(args.roi_size) + args.model_size = json.loads(args.model_size) + if args.local_rank == 0: for arg in vars(args): logging.info('USING:: {} = {}'.format(arg, getattr(args, arg))) @@ -118,6 +121,7 @@ def main(): parser.add_argument('-i', '--input', default='/workspace/data/52432/2D/dataset.json') parser.add_argument('-o', '--output', default='eval') parser.add_argument('--save_seg', type=strtobool, default='false') + parser.add_argument('--cache_dir', type=str, default=None) parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') parser.add_argument('-b', '--batch', type=int, default=1) diff --git a/deepgrow/ignite/validate_3d.py b/deepgrow/ignite/validate_3d.py index 17a214257f..efaf6367e8 100644 --- a/deepgrow/ignite/validate_3d.py +++ b/deepgrow/ignite/validate_3d.py @@ -23,6 +23,7 @@ def main(): parser.add_argument('-i', '--input', default='/workspace/data/52432/3D/dataset.json') parser.add_argument('-o', '--output', default='eval3D') parser.add_argument('--save_seg', type=strtobool, default='false') + parser.add_argument('--cache_dir', type=str, default=None) parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') parser.add_argument('-b', '--batch', type=int, default=1) From 9d2f0d5ba98b4a3f30bac47651dd7d11d1cdf804 Mon Sep 17 00:00:00 2001 From: Sachidanand Alle Date: Fri, 22 Jan 2021 05:03:37 -0800 Subject: [PATCH 05/10] clean up + fix loadnifitd Signed-off-by: Sachidanand Alle --- deepgrow/ignite/inference.ipynb | 21 +++++----- deepgrow/ignite/inference_3d.ipynb | 64 ++++++++++++++++-------------- 2 files changed, 44 insertions(+), 41 deletions(-) diff --git a/deepgrow/ignite/inference.ipynb b/deepgrow/ignite/inference.ipynb index 482616d2f7..9705a15700 100644 --- a/deepgrow/ignite/inference.ipynb +++ b/deepgrow/ignite/inference.ipynb @@ -91,14 +91,14 @@ "source": [ "# Pre Processing\n", "roi_size = [256, 256]\n", + "model_size = [128, 192, 192]\n", "pixdim = (1.0, 1.0)\n", "dimensions = 2\n", "\n", "data = {\n", - " 'image': '/salle/Downloads/spleen_19.nii.gz',\n", - " 'foreground': [[354, 336, 40]], # ,[259,381,40]],\n", - " 'background': [],\n", - " 'spatial_size': [384, 384]\n", + " 'image': '_image.nii.gz',\n", + " 'foreground': [[66, 180, 105]],\n", + " 'background': []\n", "}\n", "slice_idx = original_slice_idx = data['foreground'][0][2]\n", "\n", @@ -134,9 +134,8 @@ " tname, image.shape, label.shape if label is not None else None))\n", "\n", " image = image if tname == 'Fetch2DSliced' else image[:, :, slice_idx] if tname in (\n", - " 'LoadNiftid') else image[slice_idx, :, :]\n", - " label = label if tname == 'Fetch2DSliced' else label[:, :, slice_idx] if tname in (\n", - " 'xyz') else label[slice_idx, :, :] if label is not None else None\n", + " 'LoadImaged') else image[slice_idx, :, :]\n", + " label = None\n", "\n", " guidance = guidance if guidance else [np.roll(data['foreground'], 1).tolist(), []]\n", " print('Guidance: {}'.format(guidance))\n", @@ -144,7 +143,7 @@ " show_image(image, label, guidance)\n", " if tname == 'Fetch2DSliced':\n", " slice_idx = 0\n", - " if tname == 'LoadNiftid':\n", + " if tname == 'LoadImaged':\n", " original_image = data['image']\n", " if tname == 'AddChanneld':\n", " original_image_slice = data['image']\n" @@ -157,7 +156,7 @@ "outputs": [], "source": [ "# Evaluation\n", - "model_path = '/workspace/Downloads/models/roi_b8_256x256_c32.ts'\n", + "model_path = '/workspace/Data/models/deepgrow_2d.ts'\n", "model = jit.load(model_path)\n", "model.cuda()\n", "model.eval()\n", @@ -179,12 +178,12 @@ " tname = type(t).__name__\n", "\n", " data = t(data)\n", - " image = data['image']\n", + " image = original_image if tname == 'RestoreCroppedLabeld' else data['image']\n", " label = data['pred']\n", " print(\"{} => image shape: {}, pred shape: {}\".format(tname, image.shape, label.shape))\n", "\n", " if tname in 'RestoreCroppedLabeld':\n", - " image = original_image[:, :, original_slice_idx]\n", + " image = image[:, :, original_slice_idx]\n", " label = label[0, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[original_slice_idx]\n", " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", diff --git a/deepgrow/ignite/inference_3d.ipynb b/deepgrow/ignite/inference_3d.ipynb index 9d24250de8..466330c174 100644 --- a/deepgrow/ignite/inference_3d.ipynb +++ b/deepgrow/ignite/inference_3d.ipynb @@ -9,8 +9,6 @@ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import torch\n", - "from IPython.display import HTML\n", - "from celluloid import Camera # getting the camera\n", "from torch import jit\n", "\n", "from monai.apps.deepgrow.transforms import (\n", @@ -93,18 +91,15 @@ "outputs": [], "source": [ "# Pre Processing\n", - "roi_size = [128, 256, 256]\n", - "model_size = [128, 128, 128]\n", + "roi_size = [256, 256]\n", + "model_size = [128, 192, 192]\n", "pixdim = (1.0, 1.0, 1.0)\n", "dimensions = 3\n", "\n", "data = {\n", - " # 'image': '/salle/Downloads/spleen_19.nii.gz',\n", - " # 'foreground': [[354, 336, 40],[259,381,40]],\n", - " 'image': '/salle/Downloads/_image.nii.gz',\n", - " 'foreground': [[303, 154, 124], [202, 264, 124], [298, 215, 164], [298, 222, 35], [298, 216, 77], [286, 211, 176]],\n", - " #'foreground': [[67, 174, 105], [70, 174, 134], [70, 177, 151], [104, 141, 161], [100, 160, 177], [73, 176, 89], [99, 161, 183], [68, 171, 86], [105, 171, 186]],\n", - " #'foreground': [[278, 202, 142], [158, 269, 142], [306, 113, 142], [293, 220, 35], [326, 155, 67], [138, 268, 142], [330, 155, 97]], #[[73, 177, 90]],\n", + " 'image': '_image.nii.gz',\n", + " #'foreground': [[303, 154, 124], [202, 264, 124], [298, 215, 164], [298, 222, 35], [298, 216, 77], [286, 211, 176]], # Liver\n", + " 'foreground': [[66, 180, 105], [66, 180, 145]], # Spleen\n", " 'background': [],\n", "}\n", "slice_idx = original_slice_idx = data['foreground'][0][2]\n", @@ -142,12 +137,12 @@ " if tname == 'Resized':\n", " continue\n", "\n", - " image = image[:, :, slice_idx] if tname in ('LoadNiftid') else image[slice_idx] if tname in (\n", + " image = image[:, :, slice_idx] if tname in ('LoadImaged') else image[slice_idx] if tname in (\n", " 'AsChannelFirstd', 'Spacingd', 'AddGuidanceFromPointsd') else image[0][slice_idx]\n", " label = None\n", "\n", " show_image(image, label, guidance, slice_idx)\n", - " if tname == 'LoadNiftid':\n", + " if tname == 'LoadImaged':\n", " original_image = data['image']\n", " if tname == 'AddChanneld':\n", " original_image_slice = data['image']\n", @@ -158,8 +153,9 @@ "label = data.get('label')\n", "guidance = data.get('guidance')\n", "for i in range(image.shape[1]):\n", - " print('Slice Idx: {}'.format(i))\n", - " show_image(image[0][i], None, guidance, i)\n" + " #print('Slice Idx: {}'.format(i))\n", + " #show_image(image[0][i], None, guidance, i)\n", + " pass\n" ] }, { @@ -169,7 +165,7 @@ "outputs": [], "source": [ "# Evaluation\n", - "model_path = '/workspace/Downloads/models/3d_r128x256x256_m128x128x128_c32.ts' #3d_roi_128x256x256_m96x192x192_b1_c32.ts' #3d_roi_192_m96_b1_c64.ts'\n", + "model_path = '/workspace/Data/models/deepgrow_3d.ts'\n", "model = jit.load(model_path)\n", "model.cuda()\n", "model.eval()\n", @@ -204,8 +200,8 @@ " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", " show_image(image, label)\n", - " elif tname == 'ToNumpyd':\n", - " for i in range(label.shape[-1]):\n", + " elif tname == 'xToNumpyd': # Rename to Plot model output without post-transform\n", + " for i in range(label.shape[1]):\n", " img = image[0, i, :, :].detach().cpu().numpy() if torch.is_tensor(image) else image[0][i]\n", " lab = label[0, i, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[0][i]\n", " if np.sum(lab) > 0:\n", @@ -219,27 +215,35 @@ " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", " show_image(image, label)\n", "\n", - "fig, ax = plt.subplots() # make it bigger\n", - "camera = Camera(fig) # the camera gets our figure\n", - "\n", "for i in range(pred.shape[0]):\n", " image = original_image[:, :, i]\n", " label = pred[i, :, :]\n", " if np.sum(label) == 0:\n", " continue\n", "\n", - " # print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", - " # i, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", - " # show_image(label, None)\n", + " print(\"Final PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", + " i, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", + " show_image(image, label)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from monai.data import write_nifti\n", "\n", - " ax.imshow(image, cmap=\"gray\") # plotting\n", - " if np.sum(label) > 0:\n", - " masked = np.ma.masked_where(label == 0, label)\n", - " ax.imshow(masked, 'hsv', interpolation='none', alpha=0.7)\n", - " camera.snap()\n", + "pred = data['pred']\n", + "meta_data = data['pred_meta_dict']\n", + "affine = meta_data.get(\"affine\", None)\n", + "\n", + "pred = np.moveaxis(pred, 0, -1)\n", + "print('Prediction NII shape: {}'.format(pred.shape))\n", "\n", - "animation = camera.animate()\n", - "HTML(animation.to_html5_video())" + "file_name = 'result_label.nii.gz'\n", + "write_nifti(pred, file_name=file_name)\n", + "print('Prediction saved at: {}'.format(file_name))" ] }, { From a7d96817423823f5e4c87b64ac43ad3514cff13d Mon Sep 17 00:00:00 2001 From: Sachidanand Alle Date: Mon, 8 Feb 2021 13:19:10 -0800 Subject: [PATCH 06/10] fix comments Signed-off-by: Sachidanand Alle --- deepgrow/ignite/train.py | 2 -- deepgrow/ignite/validate.py | 11 +---------- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/deepgrow/ignite/train.py b/deepgrow/ignite/train.py index 0bd9050f1f..1de5d1cea5 100644 --- a/deepgrow/ignite/train.py +++ b/deepgrow/ignite/train.py @@ -9,7 +9,6 @@ import torch import torch.distributed as dist -from monai.apps.deepgrow.handler import DeepgrowStatsHandler from monai.apps.deepgrow.interaction import Interaction from monai.apps.deepgrow.transforms import ( SpatialCropForegroundd, @@ -184,7 +183,6 @@ def create_trainer(args): val_handlers = [ StatsHandler(output_transform=lambda x: None), TensorBoardStatsHandler(log_dir=args.output, output_transform=lambda x: None), - DeepgrowStatsHandler(log_dir=args.output, tag_name='val_dice', image_interval=args.image_interval), CheckpointSaver(save_dir=args.output, save_dict={"net": network}, save_key_metric=True, save_final=True, save_interval=args.save_interval, final_filename='model.pt') ] diff --git a/deepgrow/ignite/validate.py b/deepgrow/ignite/validate.py index d16810c549..f45633b297 100644 --- a/deepgrow/ignite/validate.py +++ b/deepgrow/ignite/validate.py @@ -9,7 +9,6 @@ import torch import train -from monai.apps.deepgrow.handler import DeepgrowStatsHandler, SegmentationSaver from monai.apps.deepgrow.interaction import Interaction from monai.engines import SupervisedEvaluator from monai.handlers import ( @@ -46,15 +45,8 @@ def create_validator(args, click): val_handlers = [ StatsHandler(output_transform=lambda x: None), - TensorBoardStatsHandler(log_dir=args.output, output_transform=lambda x: None), - DeepgrowStatsHandler( - log_dir=args.output, - tag_name=f'clicks_{click}_val_dice', - fold_size=int(len(val_loader.dataset) / args.batch / args.folds) if args.folds else 0 - ), + TensorBoardStatsHandler(log_dir=args.output, output_transform=lambda x: None) ] - if args.save_seg: - val_handlers.append(SegmentationSaver(output_dir=os.path.join(args.output, f'clicks_{click}_images'))) evaluator = SupervisedEvaluator( device=device, @@ -120,7 +112,6 @@ def main(): parser.add_argument('-i', '--input', default='/workspace/data/52432/2D/dataset.json') parser.add_argument('-o', '--output', default='eval') - parser.add_argument('--save_seg', type=strtobool, default='false') parser.add_argument('--cache_dir', type=str, default=None) parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') From 81919e4989faf3f19a7787b932d35dd4af9895f0 Mon Sep 17 00:00:00 2001 From: Sachidanand Alle Date: Tue, 9 Feb 2021 07:44:50 -0800 Subject: [PATCH 07/10] after verifying for both 2D train+validate on spleen dataset Signed-off-by: Sachidanand Alle --- deepgrow/ignite/__init__.py | 0 deepgrow/ignite/handler.py | 284 +++++++++++++++++++++++++++++ deepgrow/ignite/prepare_dataset.py | 17 +- deepgrow/ignite/train.py | 18 +- deepgrow/ignite/train_3d.py | 6 +- deepgrow/ignite/validate.py | 15 +- deepgrow/ignite/validate_3d.py | 2 +- 7 files changed, 315 insertions(+), 27 deletions(-) create mode 100644 deepgrow/ignite/__init__.py create mode 100644 deepgrow/ignite/handler.py diff --git a/deepgrow/ignite/__init__.py b/deepgrow/ignite/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deepgrow/ignite/handler.py b/deepgrow/ignite/handler.py new file mode 100644 index 0000000000..60c6bc5966 --- /dev/null +++ b/deepgrow/ignite/handler.py @@ -0,0 +1,284 @@ +# Copyright 2020 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import statistics + +import numpy as np +import torch +import torch.distributed + +from monai.engines.workflow import Engine, Events +from monai.handlers.tensorboard_handlers import SummaryWriter +from monai.metrics import compute_meandice +from monai.transforms import rescale_array +from monai.utils import optional_import +from monai.visualize import plot_2d_or_3d_image + +nib, _ = optional_import("nibabel") +torchvision, _ = optional_import("torchvision") +make_grid, _ = optional_import("torchvision.utils", name="make_grid") +Image, _ = optional_import("PIL.Image") +ImageDraw, _ = optional_import("PIL.ImageDraw") + + +class RegionDice: + def __init__(self): + self.data = [] + + def reset(self): + self.data = [] + + def update(self, y_pred, y, batched=True): + if not batched: + y_pred = y_pred[None] + y = y[None] + score = compute_meandice(y_pred=y_pred, y=y, include_background=False).mean() + self.data.append(score.item()) + + def mean(self): + return statistics.mean(self.data) + + def stdev(self): + return statistics.stdev(self.data) if len(self.data) > 1 else 0 + + +class DeepgrowStatsHandler: + def __init__( + self, + summary_writer=None, + interval=1, + log_dir="./runs", + tag_name="val_dice", + compute_metric=True, + images=True, + image_interval=1, + max_channels=1, + max_frames=64, + add_scalar=True, + merge_scalar=False, + fold_size=0, + ): + self.writer = SummaryWriter(log_dir=log_dir) if summary_writer is None else summary_writer + self.interval = interval + self.tag_name = tag_name + self.compute_metric = compute_metric + self.images = images + self.image_interval = image_interval + self.max_channels = max_channels + self.max_frames = max_frames + self.add_scalar = add_scalar + self.merge_scalar = merge_scalar + self.fold_size = fold_size + self.logger = logging.getLogger(__name__) + + if torch.distributed.is_initialized(): + self.tag_name = "{}-r{}".format(self.tag_name, torch.distributed.get_rank()) + + self.plot_data = {} + self.metric_data = {} + + def attach(self, engine: Engine) -> None: + engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.interval), self, "iteration") + engine.add_event_handler(Events.EPOCH_COMPLETED(every=1), self, "epoch") + + def write_images(self, epoch): + if not self.plot_data or not len(self.plot_data): + return + + all_imgs = [] + for region in sorted(self.plot_data.keys()): + metric = self.metric_data.get(region) + region_data = self.plot_data[region] + if len(region_data[0].shape) == 3: + ti = Image.new("RGB", region_data[0].shape[1:]) + d = ImageDraw.Draw(ti) + t = "region: {}".format(region) + if self.compute_metric: + t = t + "\ndice: {:.4f}".format(metric.mean()) + t = t + "\nstdev: {:.4f}".format(metric.stdev()) + d.multiline_text((10, 10), t, fill=(255, 255, 0)) + ti = rescale_array(np.rollaxis(np.array(ti), 2, 0)[0][np.newaxis]) + all_imgs.append(ti) + all_imgs.extend(region_data) + + if len(all_imgs[0].shape) == 3: + img_tensor = make_grid(tensor=torch.from_numpy(np.array(all_imgs)), nrow=4, normalize=True, pad_value=2) + self.writer.add_image(tag=f"Deepgrow Regions ({self.tag_name})", img_tensor=img_tensor, global_step=epoch) + + if len(all_imgs[0].shape) == 4: + for region in sorted(self.plot_data.keys()): + tags = [f"region_{region}_image", f"region_{region}_label", f"region_{region}_output"] + if torch.distributed.is_initialized(): + rank = "r{}-".format(torch.distributed.get_rank()) + tags = [rank + tags[0], rank + tags[1], rank + tags[2]] + for i in range(3): + img = self.plot_data[region][i] + img = np.moveaxis(img, -3, -1) + plot_2d_or_3d_image( + img[np.newaxis], epoch, self.writer, 0, self.max_channels, self.max_frames, tags[i] + ) + + self.logger.info( + "Saved {} Regions {} into Tensorboard at epoch: {}".format( + len(self.plot_data), sorted([*self.plot_data]), epoch + ) + ) + self.writer.flush() + + def write_region_metrics(self, epoch): + metric_sum = 0 + means = {} + for region in self.metric_data: + metric = self.metric_data[region].mean() + self.logger.info( + "Epoch[{}] Metrics -- Region: {:0>2d}, {}: {:.4f}".format(epoch, region, self.tag_name, metric) + ) + + if self.merge_scalar: + means["{:0>2d}".format(region)] = metric + else: + self.writer.add_scalar("{}_{:0>2d}".format(self.tag_name, region), metric, epoch) + metric_sum += metric + + if self.merge_scalar: + means["avg"] = metric_sum / len(self.metric_data) + self.writer.add_scalars("{}_region".format(self.tag_name), means, epoch) + elif len(self.metric_data) > 1: + metric_avg = metric_sum / len(self.metric_data) + self.writer.add_scalar("{}_regions_avg".format(self.tag_name), metric_avg, epoch) + self.writer.flush() + + def __call__(self, engine: Engine, action) -> None: + total_steps = engine.state.iteration + if total_steps < engine.state.epoch_length: + total_steps = engine.state.epoch_length * (engine.state.epoch - 1) + total_steps + + if action == "epoch" and not self.fold_size: + epoch = engine.state.epoch + elif self.fold_size and total_steps % self.fold_size == 0: + epoch = int(total_steps / self.fold_size) + else: + epoch = None + + if epoch: + if self.images and epoch % self.image_interval == 0: + self.write_images(epoch) + if self.add_scalar: + self.write_region_metrics(epoch) + + if action == "epoch" or epoch: + self.plot_data = {} + self.metric_data = {} + return + + device = engine.state.device + batch_data = engine.state.batch + output_data = engine.state.output + + for bidx in range(len(batch_data.get("region", []))): + region = batch_data.get("region")[bidx] + region = region.item() if torch.is_tensor(region) else region + + if self.images and self.plot_data.get(region) is None: + self.plot_data[region] = [ + rescale_array(batch_data["image"][bidx][0].detach().cpu().numpy()[np.newaxis], 0, 1), + rescale_array(batch_data["label"][bidx].detach().cpu().numpy(), 0, 1), + rescale_array(output_data["pred"][bidx].detach().cpu().numpy(), 0, 1), + ] + + if self.compute_metric: + if self.metric_data.get(region) is None: + self.metric_data[region] = RegionDice() + self.metric_data[region].update( + y_pred=output_data["pred"][bidx].to(device), y=batch_data["label"][bidx].to(device), batched=False + ) + + +class SegmentationSaver: + def __init__( + self, + output_dir: str = "./runs", + save_np=False, + images=True, + ): + self.output_dir = output_dir + self.save_np = save_np + self.images = images + os.makedirs(self.output_dir, exist_ok=True) + + def attach(self, engine: Engine) -> None: + if not engine.has_event_handler(self, Events.ITERATION_COMPLETED): + engine.add_event_handler(Events.ITERATION_COMPLETED, self) + + def __call__(self, engine: Engine): + batch_data = engine.state.batch + output_data = engine.state.output + device = engine.state.device + tag = "" + if torch.distributed.is_initialized(): + tag = "r{}-".format(torch.distributed.get_rank()) + + for bidx in range(len(batch_data.get("image"))): + step = engine.state.iteration + region = batch_data.get("region")[bidx] + region = region.item() if torch.is_tensor(region) else region + + image = batch_data["image"][bidx][0].detach().cpu().numpy()[np.newaxis] + label = batch_data["label"][bidx].detach().cpu().numpy() + pred = output_data["pred"][bidx].detach().cpu().numpy() + dice = compute_meandice( + y_pred=output_data["pred"][bidx][None].to(device), + y=batch_data["label"][bidx][None].to(device), + include_background=False, + ).mean() + + if self.save_np: + np.savez( + os.path.join( + self.output_dir, + "{}img_label_pred_{}_{:0>4d}_{:0>2d}_{:.4f}".format(tag, region, step, bidx, dice), + ), + image, + label, + pred, + ) + + if self.images and len(image.shape) == 3: + img = make_grid(torch.from_numpy(rescale_array(image, 0, 1)[0])) + lab = make_grid(torch.from_numpy(rescale_array(label, 0, 1)[0])) + + pos = rescale_array(output_data["image"][bidx][1].detach().cpu().numpy()[np.newaxis], 0, 1)[0] + neg = rescale_array(output_data["image"][bidx][2].detach().cpu().numpy()[np.newaxis], 0, 1)[0] + pre = make_grid(torch.from_numpy(np.array([rescale_array(pred, 0, 1)[0], pos, neg]))) + + torchvision.utils.save_image( + tensor=[img, lab, pre], + nrow=3, + pad_value=2, + fp=os.path.join( + self.output_dir, + "{}img_label_pred_{}_{:0>4d}_{:0>2d}_{:.4f}.png".format(tag, region, step, bidx, dice), + ), + ) + + if self.images and len(image.shape) == 4: + samples = {"image": image[0], "label": label[0], "pred": pred[0]} + for sample in samples: + img = np.moveaxis(samples[sample], -3, -1) + img = nib.Nifti1Image(img, np.eye(4)) + nib.save( + img, + os.path.join( + self.output_dir, "{}{}_{:0>4d}_{:0>2d}_{:.4f}.nii.gz".format(tag, sample, step, bidx, dice) + ), + ) diff --git a/deepgrow/ignite/prepare_dataset.py b/deepgrow/ignite/prepare_dataset.py index dbfa4a8f7c..0cdca209f0 100644 --- a/deepgrow/ignite/prepare_dataset.py +++ b/deepgrow/ignite/prepare_dataset.py @@ -6,7 +6,6 @@ import sys from monai.apps.deepgrow.dataset import create_dataset -from monai.data import partition_dataset def prepare_datalist(args): @@ -39,15 +38,6 @@ def prepare_datalist(args): logging.info('+++ Total Records: {}'.format(len(datalist))) logging.info('') - train_ds, val_ds = partition_dataset(datalist, ratios=[args.split, (1 - args.split)], shuffle=True, seed=args.seed) - dataset_json = os.path.join(args.output, 'dataset-0.json') - with open(dataset_json, 'w') as fp: - json.dump({'training': train_ds, 'validation': val_ds}, fp, indent=2) - - logging.info('*** Dataset File: {}'.format(dataset_json)) - logging.info('*** Total Records for Training: {}'.format(len(train_ds))) - logging.info('*** Total Records for Validation: {}'.format(len(val_ds))) - def run(args): for arg in vars(args): @@ -70,12 +60,11 @@ def main(): parser.add_argument('-s', '--seed', type=int, default=42) parser.add_argument('-dims', '--dimensions', type=int, default=2) - parser.add_argument('-d', '--dataset_root', default='/workspace/data/52432') - parser.add_argument('-j', '--dataset_json', default='/workspace/data/52432/dataset.json') + parser.add_argument('-d', '--dataset_root', default='/workspace/data/MSD_Task09_Spleen') + parser.add_argument('-j', '--dataset_json', default='/workspace/data/MSD_Task09_Spleen/dataset.json') parser.add_argument('-k', '--datalist_key', default='training') - parser.add_argument('-o', '--output', default='/workspace/data/52432/2D') - parser.add_argument('-x', '--split', type=float, default=0.9) + parser.add_argument('-o', '--output', default='/workspace/data/deepgrow/2D/MSD_Task09_Spleen') parser.add_argument('-t', '--limit', type=int, default=0) parser.add_argument('-r', '--relative_path', type=strtobool, default='false') diff --git a/deepgrow/ignite/train.py b/deepgrow/ignite/train.py index 1de5d1cea5..2483710240 100644 --- a/deepgrow/ignite/train.py +++ b/deepgrow/ignite/train.py @@ -46,6 +46,7 @@ Resized, ) from monai.utils import set_determinism +from handler import DeepgrowStatsHandler def get_network(network, channels, dimensions): @@ -71,18 +72,22 @@ def get_network(network, channels, dimensions): return network -def get_pre_transforms(roi_size, model_size): - return Compose([ +def get_pre_transforms(roi_size, model_size, dimensions): + t = [ LoadImaged(keys=('image', 'label')), AddChanneld(keys=('image', 'label')), SpatialCropForegroundd(keys=('image', 'label'), source_key='label', spatial_size=roi_size), Resized(keys=('image', 'label'), spatial_size=model_size, mode=('area', 'nearest')), - NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0), - FindAllValidSlicesd(label='label', sids='sids'), + NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0) + ] + if dimensions == 3: + t.append(FindAllValidSlicesd(label='label', sids='sids')) + t.extend([ AddInitialSeedPointd(label='label', guidance='guidance', sids='sids'), AddGuidanceSignald(image='image', guidance='guidance'), ToTensord(keys=('image', 'label')) ]) + return Compose(t) def get_click_transforms(): @@ -163,7 +168,7 @@ def create_trainer(args): else: device = torch.device("cuda" if args.use_gpu else "cpu") - pre_transforms = get_pre_transforms(args.roi_size, args.model_size) + pre_transforms = get_pre_transforms(args.roi_size, args.model_size, args.dimensions) click_transforms = get_click_transforms() post_transform = get_post_transforms() @@ -183,6 +188,7 @@ def create_trainer(args): val_handlers = [ StatsHandler(output_transform=lambda x: None), TensorBoardStatsHandler(log_dir=args.output, output_transform=lambda x: None), + DeepgrowStatsHandler(log_dir=args.output, tag_name='val_dice', image_interval=args.image_interval), CheckpointSaver(save_dir=args.output, save_dict={"net": network}, save_key_metric=True, save_final=True, save_interval=args.save_interval, final_filename='model.pt') ] @@ -306,7 +312,7 @@ def main(): parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) parser.add_argument('-c', '--channels', type=int, default=32) - parser.add_argument('-i', '--input', default='/workspace/data/52432/2D/dataset.json') + parser.add_argument('-i', '--input', default='/workspace/data/deepgrow/2D/MSD_Task09_Spleen/dataset.json') parser.add_argument('-o', '--output', default='output') parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') diff --git a/deepgrow/ignite/train_3d.py b/deepgrow/ignite/train_3d.py index ef6b2e53b1..c1fbf89fc8 100644 --- a/deepgrow/ignite/train_3d.py +++ b/deepgrow/ignite/train_3d.py @@ -24,7 +24,7 @@ def strtobool(val): parser.add_argument('-n', '--network', default='bunet', choices=['unet', 'bunet']) parser.add_argument('-c', '--channels', type=int, default=32) - parser.add_argument('-i', '--input', default='/workspace/data/52432/3D/dataset.json') + parser.add_argument('-i', '--input', default='/workspace/data/deepgrow/3D/MSD_Task09_Spleen/dataset.json') parser.add_argument('-o', '--output', default='output3D') parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') @@ -57,13 +57,13 @@ def strtobool(val): ''' # Single GPU (it will also export) -python train.py +python train_3d.py # Multi GPU (run export separate) python -m torch.distributed.launch \ --nproc_per_node=`nvidia-smi -L | wc -l` \ --nnodes=1 --node_rank=0 --master_addr="localhost" --master_port=1234 \ - -m train --multi_gpu true -e 100 + -m train_3d --multi_gpu true -e 100 python train.py --export ''' diff --git a/deepgrow/ignite/validate.py b/deepgrow/ignite/validate.py index f45633b297..127a4c5041 100644 --- a/deepgrow/ignite/validate.py +++ b/deepgrow/ignite/validate.py @@ -9,6 +9,7 @@ import torch import train +from handler import DeepgrowStatsHandler, SegmentationSaver from monai.apps.deepgrow.interaction import Interaction from monai.engines import SupervisedEvaluator from monai.handlers import ( @@ -24,7 +25,7 @@ def create_validator(args, click): device = torch.device("cuda" if args.use_gpu else "cpu") - pre_transforms = train.get_pre_transforms(args.roi_size, args.model_size) + pre_transforms = train.get_pre_transforms(args.roi_size, args.model_size, args.dimensions) click_transforms = train.get_click_transforms() post_transform = train.get_post_transforms() @@ -45,8 +46,15 @@ def create_validator(args, click): val_handlers = [ StatsHandler(output_transform=lambda x: None), - TensorBoardStatsHandler(log_dir=args.output, output_transform=lambda x: None) + TensorBoardStatsHandler(log_dir=args.output, output_transform=lambda x: None), + DeepgrowStatsHandler( + log_dir=args.output, + tag_name=f'clicks_{click}_val_dice', + fold_size=int(len(val_loader.dataset) / args.batch / args.folds) if args.folds else 0 + ), ] + if args.save_seg: + val_handlers.append(SegmentationSaver(output_dir=os.path.join(args.output, f'clicks_{click}_images'))) evaluator = SupervisedEvaluator( device=device, @@ -110,8 +118,9 @@ def main(): parser.add_argument('-c', '--channels', type=int, default=32) parser.add_argument('-f', '--folds', type=int, default=10) - parser.add_argument('-i', '--input', default='/workspace/data/52432/2D/dataset.json') + parser.add_argument('-i', '--input', default='/workspace/data/deepgrow/2D/MSD_Task09_Spleen/dataset.json') parser.add_argument('-o', '--output', default='eval') + parser.add_argument('--save_seg', type=strtobool, default='false') parser.add_argument('--cache_dir', type=str, default=None) parser.add_argument('-g', '--use_gpu', type=strtobool, default='true') diff --git a/deepgrow/ignite/validate_3d.py b/deepgrow/ignite/validate_3d.py index efaf6367e8..c4121c8c0c 100644 --- a/deepgrow/ignite/validate_3d.py +++ b/deepgrow/ignite/validate_3d.py @@ -20,7 +20,7 @@ def main(): parser.add_argument('-c', '--channels', type=int, default=32) parser.add_argument('-f', '--folds', type=int, default=10) - parser.add_argument('-i', '--input', default='/workspace/data/52432/3D/dataset.json') + parser.add_argument('-i', '--input', default='/workspace/data/deepgrow/3D/MSD_Task09_Spleen/dataset.json') parser.add_argument('-o', '--output', default='eval3D') parser.add_argument('--save_seg', type=strtobool, default='false') parser.add_argument('--cache_dir', type=str, default=None) From 543c24e57bdc9499b32827a994825ee19e34ae38 Mon Sep 17 00:00:00 2001 From: Sachidanand Alle Date: Tue, 9 Feb 2021 08:08:19 -0800 Subject: [PATCH 08/10] Fix build Signed-off-by: Sachidanand Alle --- deepgrow/ignite/inference.ipynb | 21 ++++++++++---------- deepgrow/ignite/inference_3d.ipynb | 32 ++++++++++++------------------ 2 files changed, 23 insertions(+), 30 deletions(-) diff --git a/deepgrow/ignite/inference.ipynb b/deepgrow/ignite/inference.ipynb index 9705a15700..07c73dca3e 100644 --- a/deepgrow/ignite/inference.ipynb +++ b/deepgrow/ignite/inference.ipynb @@ -32,6 +32,8 @@ " Resized\n", ")\n", "\n", + "max_epochs = 1\n", + "\n", "\n", "def draw_points(guidance):\n", " if guidance is None:\n", @@ -80,7 +82,7 @@ " for m in data[k]:\n", " print('{} Meta:: {} => {}'.format(k, m, data[k][m]))\n", " else:\n", - " print('Data key: {} = {}'.format(k, d))\n" + " print('Data key: {} = {}'.format(k, d))" ] }, { @@ -146,13 +148,17 @@ " if tname == 'LoadImaged':\n", " original_image = data['image']\n", " if tname == 'AddChanneld':\n", - " original_image_slice = data['image']\n" + " original_image_slice = data['image']" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Evaluation\n", @@ -193,15 +199,8 @@ " label = label[0, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[0]\n", " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", - " show_image(image, label)\n" + " show_image(image, label)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/deepgrow/ignite/inference_3d.ipynb b/deepgrow/ignite/inference_3d.ipynb index 466330c174..dd8cc44298 100644 --- a/deepgrow/ignite/inference_3d.ipynb +++ b/deepgrow/ignite/inference_3d.ipynb @@ -18,6 +18,7 @@ " RestoreCroppedLabeld,\n", " SpatialCropGuidanced,\n", ")\n", + "from monai.data import write_nifti\n", "from monai.transforms import (\n", " AsChannelFirstd,\n", " Spacingd,\n", @@ -31,6 +32,8 @@ " Resized\n", ")\n", "\n", + "max_epochs = 1\n", + "\n", "\n", "def draw_points(guidance, slice_idx):\n", " if guidance is None:\n", @@ -81,7 +84,7 @@ " for m in data[k]:\n", " print('{} Meta:: {} => {}'.format(k, m, data[k][m]))\n", " else:\n", - " print('Data key: {} = {}'.format(k, d))\n" + " print('Data key: {} = {}'.format(k, d))" ] }, { @@ -98,8 +101,7 @@ "\n", "data = {\n", " 'image': '_image.nii.gz',\n", - " #'foreground': [[303, 154, 124], [202, 264, 124], [298, 215, 164], [298, 222, 35], [298, 216, 77], [286, 211, 176]], # Liver\n", - " 'foreground': [[66, 180, 105], [66, 180, 145]], # Spleen\n", + " 'foreground': [[66, 180, 105], [66, 180, 145]],\n", " 'background': [],\n", "}\n", "slice_idx = original_slice_idx = data['foreground'][0][2]\n", @@ -108,11 +110,9 @@ " LoadImaged(keys='image'),\n", " AsChannelFirstd(keys='image'),\n", " Spacingd(keys='image', pixdim=pixdim, mode='bilinear'),\n", - "\n", " AddGuidanceFromPointsd(ref_image='image', guidance='guidance', foreground='foreground', background='background',\n", " dimensions=dimensions),\n", " AddChanneld(keys='image'),\n", - "\n", " SpatialCropGuidanced(keys='image', guidance='guidance', spatial_size=roi_size),\n", " Resized(keys='image', spatial_size=model_size, mode='area'),\n", " ResizeGuidanced(guidance='guidance', ref_image='image'),\n", @@ -153,9 +153,8 @@ "label = data.get('label')\n", "guidance = data.get('guidance')\n", "for i in range(image.shape[1]):\n", - " #print('Slice Idx: {}'.format(i))\n", - " #show_image(image[0][i], None, guidance, i)\n", - " pass\n" + " print('Slice Idx: {}'.format(i))\n", + " # show_image(image[0][i], None, guidance, i)" ] }, { @@ -200,7 +199,7 @@ " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", " tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label)))\n", " show_image(image, label)\n", - " elif tname == 'xToNumpyd': # Rename to Plot model output without post-transform\n", + " elif tname == 'xToNumpyd':\n", " for i in range(label.shape[1]):\n", " img = image[0, i, :, :].detach().cpu().numpy() if torch.is_tensor(image) else image[0][i]\n", " lab = label[0, i, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[0][i]\n", @@ -229,11 +228,13 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from monai.data import write_nifti\n", - "\n", "pred = data['pred']\n", "meta_data = data['pred_meta_dict']\n", "affine = meta_data.get(\"affine\", None)\n", @@ -245,13 +246,6 @@ "write_nifti(pred, file_name=file_name)\n", "print('Prediction saved at: {}'.format(file_name))" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { From bdb5eafd2e45b868fcfde3772aee71a2b6ff6e29 Mon Sep 17 00:00:00 2001 From: Sachidanand Alle Date: Tue, 2 Mar 2021 04:47:48 -0800 Subject: [PATCH 09/10] Add/Update Readme + Fix based on MONAI master Signed-off-by: Sachidanand Alle --- README.md | 4 + deepgrow/ignite/README.md | 142 +++++++++++++++++++++++++++++ deepgrow/ignite/inference.ipynb | 4 +- deepgrow/ignite/inference_3d.ipynb | 4 +- deepgrow/ignite/stats.png | Bin 0 -> 81270 bytes 5 files changed, 150 insertions(+), 4 deletions(-) create mode 100644 deepgrow/ignite/README.md create mode 100644 deepgrow/ignite/stats.png diff --git a/README.md b/README.md index f28ce6605c..ae19955f0e 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,10 @@ The example is a PyTorch Ignite program and shows several key features of MONAI, #### [COVID 19-20 challenge baseline](./3d_segmentation/challenge_baseline) This folder provides a simple baseline method for training, validation, and inference for [COVID-19 LUNG CT LESION SEGMENTATION CHALLENGE - 2020](https://covid-segmentation.grand-challenge.org/COVID-19-20/) (a MICCAI Endorsed Event). +**deepgrow** +#### [Deepgrow](./deepgrow) +The example show how to train/validate a 2D/3D deepgrow model. It also demonstrates running an inference for trained deepgrow models. + **federated learning** #### [Substra](./federated_learning/substra) The example show how to execute the 3d segmentation torch tutorial on a federated learning platform, Substra. diff --git a/deepgrow/ignite/README.md b/deepgrow/ignite/README.md new file mode 100644 index 0000000000..398f44f39d --- /dev/null +++ b/deepgrow/ignite/README.md @@ -0,0 +1,142 @@ +# Deepgrow Examples +This folder contains examples to run train and validate a deepgrow 2D/3D model. +It also has notebooks to run inference over trained model. + +### 1. Data + +Training a deepgrow model requires data. Some public available datasets which are used in the examples can be downloaded from [Medical Segmentation Decathlon](https://drive.google.com/drive/folders/1HqEgzS8BV2c7xYNrZdEAnrHk7osJJ--2) or [Synapse](https://www.synapse.org/#!Synapse:syn3193805/wiki/217789). + +### 2. Questions and bugs + +- For questions relating to the use of MONAI, please us our [Discussions tab](https://github.com/Project-MONAI/MONAI/discussions) on the main repository of MONAI. +- For bugs relating to MONAI functionality, please create an issue on the [main repository](https://github.com/Project-MONAI/MONAI/issues). +- For bugs relating to the running of a tutorial, please create an issue in [this repository](https://github.com/Project-MONAI/Tutorials/issues). + +### 3. List of notebooks and examples +#### [Prepare Your Data](./prepare_dataset.py) +This example is a standard PyTorch program and helps user to prepare training input for 2D or 3D. + +```bash +# Run to know all possible options +python ./prepare_dataset.py -h + +# Prepare dataset to train a 2D Deepgrow model +python ./prepare_dataset.py + --dimensions 2 \ + --dataset_root MSD_Task09_Spleen \ + --dataset_json MSD_Task09_Spleen/dataset.json \ + --output deepgrow/2D/MSD_Task09_Spleen + +# Prepare dataset to train a 3D Deepgrow model +python ./prepare_dataset.py + --dimensions 3 \ + --dataset_root MSD_Task09_Spleen \ + --dataset_json MSD_Task09_Spleen/dataset.json \ + --output deepgrow/3D/MSD_Task09_Spleen +``` + +#### [Deepgrow 2D Training](./train.py) +This example is a standard PyTorch program and helps user to run training over pre-processed dataset for 2D. +```bash +# Run to know all possible options +python ./train.py -h + +# Train a 2D Deepgrow model on single-gpu +python ./train.py + --input deepgrow/2D/MSD_Task09_Spleen/dataset.json \ + --output models/2D \ + --epochs 50 + +# Train a 2D Deepgrow model on multi-gpu (NVIDIA) +python -m torch.distributed.launch \ + --nproc_per_node=`nvidia-smi -L | wc -l` \ + --nnodes=1 \ + --node_rank=0 \ + --master_addr="localhost" \ + --master_port=1234 \ + -m train \ + --multi_gpu true \ + --input deepgrow/2D/MSD_Task09_Spleen/dataset.json \ + --output models/2D \ + --epochs 50 + +# After training to export/save as torch script model +python ./train.py + --input models/2D/model.pt \ + --output models/2D/model.ts \ + --export true +``` + +#### [Deepgrow 2D Validation](./validate.py) +This example is a standard PyTorch program and helps user to run evaluation for a trained 2D model. +```bash +# Run to know all possible options +python ./validate.py -h + +# Evaluate a 2D Deepgrow model +python ./validate.py + --input deepgrow/2D/MSD_Task09_Spleen/dataset.json \ + --output eval/2D \ + --model_path models/2D/model.pt +``` + +#### [Deepgrow 2D Inference](./inference.ipynb) +This notebook helps to run pre-transforms before running inference over a Deepgrow 2D model. +It also helps to run post-transforms to get the final label mask. + + +#### [Deepgrow 3D Training](./train_3d.py) +This is an extension for [train.py](./train.py) that redefines basic default arguments to run 3D training. +```bash +# Run to know all possible options +python ./train_3d.py -h + +# Train a 3D Deepgrow model on single-gpu +python ./train_3d.py + --input deepgrow/3D/MSD_Task09_Spleen/dataset.json \ + --output models/3D \ + --epochs 100 + +# Train a 3D Deepgrow model on multi-gpu (NVIDIA) +python -m torch.distributed.launch \ + --nproc_per_node=`nvidia-smi -L | wc -l` \ + --nnodes=1 \ + --node_rank=0 \ + --master_addr="localhost" \ + --master_port=1234 \ + -m train_3d \ + --multi_gpu true \ + --input deepgrow/3D/MSD_Task09_Spleen/dataset.json \ + --output models/3D \ + --epochs 100 + +# After training to export/save as torch script model +python ./train_3d.py + --input models/3D/model.pt \ + --output models/3D/model.ts \ + --export true +``` + +#### [Deepgrow 3D Validation](./validate_3d.py) +This is an extension for [validate.py](./validate.py) that redefines basic default arguments to run 3D validation. +```bash +# Run to know all possible options +python ./validate_3d.py -h + +# Evaluate a 3D Deepgrow model +python ./validate_3d.py + --input deepgrow/3D/MSD_Task09_Spleen/dataset.json \ + --output eval/3D \ + --model_path models/3D/model.pt +``` + +#### [Deepgrow 3D Inference](./inference_3d.ipynb) +This notebook helps to run any pre-transforms before running inference over a Deepgrow 3D model. +It also helps to run post-transforms to get the final label mask. + + +#### [Deepgrow Stats](./handler.py) +It contains basic ignite handler to capture region/organ-wise statistics, save snapshots, outputs while running train/validation over a dataset that has multi-label mask. +By-default the handler is added as part of training/validation steps. + +![snashot](./stats.png) diff --git a/deepgrow/ignite/inference.ipynb b/deepgrow/ignite/inference.ipynb index 07c73dca3e..f3d9282b8b 100644 --- a/deepgrow/ignite/inference.ipynb +++ b/deepgrow/ignite/inference.ipynb @@ -16,7 +16,7 @@ " AddGuidanceSignald,\n", " Fetch2DSliced,\n", " ResizeGuidanced,\n", - " RestoreCroppedLabeld,\n", + " RestoreLabeld,\n", " SpatialCropGuidanced,\n", ")\n", "from monai.transforms import (\n", @@ -177,7 +177,7 @@ " Activationsd(keys='pred', sigmoid=True),\n", " AsDiscreted(keys='pred', threshold_values=True, logit_thresh=0.5),\n", " ToNumpyd(keys='pred'),\n", - " RestoreCroppedLabeld(keys='pred', ref_image='image', mode='nearest'),\n", + " RestoreLabeld(keys='pred', ref_image='image', mode='nearest'),\n", "]\n", "\n", "for t in post_transforms:\n", diff --git a/deepgrow/ignite/inference_3d.ipynb b/deepgrow/ignite/inference_3d.ipynb index dd8cc44298..d7221592c8 100644 --- a/deepgrow/ignite/inference_3d.ipynb +++ b/deepgrow/ignite/inference_3d.ipynb @@ -15,7 +15,7 @@ " AddGuidanceFromPointsd,\n", " AddGuidanceSignald,\n", " ResizeGuidanced,\n", - " RestoreCroppedLabeld,\n", + " RestoreLabeld,\n", " SpatialCropGuidanced,\n", ")\n", "from monai.data import write_nifti\n", @@ -179,7 +179,7 @@ " Activationsd(keys='pred', sigmoid=True),\n", " AsDiscreted(keys='pred', threshold_values=True, logit_thresh=0.5),\n", " ToNumpyd(keys='pred'),\n", - " RestoreCroppedLabeld(keys='pred', ref_image='image', mode='nearest'),\n", + " RestoreLabeld(keys='pred', ref_image='image', mode='nearest'),\n", "]\n", "\n", "pred = None\n", diff --git a/deepgrow/ignite/stats.png b/deepgrow/ignite/stats.png new file mode 100644 index 0000000000000000000000000000000000000000..abae792137ab18df256efeeb4897a623e6cad0e8 GIT binary patch literal 81270 zcmZ5{bySt_w)LhPHr=p+O-VOMmxQFWbb}y`bZu(WDcz_bA}JxAQqm|O-6`Gqy`1ly zd(ZFw!C;Kd81VAE>zQlLx#o(}P*cFcq`(A$KsZW@a<4!jM3kp5bVT5j;Ro{9z&B)D zX;oDMW4DR_{Xt(NN^9{f%$3r&FTn~caJM9Ix08eBBr#H~ zR6Z-gyNBW3ANvvo@muNeQW=_4ksw{=Ji7{DgQDl#(K!(-i4C zqaA8~?LS}m$xo<+2-U~X62yg~H5r?ArZfwN9&3$>K0Qe>4K||WI*D1sL~vr@(NUx% zReN_g%|(@5GSVG*g9u9bF&*kckm?*=-yt5W+$mK%@$8ivjyiHvphi*!AsG$zY6;e0 z0@E*|vA?Yfwr;ntWAkB)JWkSwWann!tB?ke^+=`Y__nniP-aSurw1x+_s} zbN4r^B;1+PxNvP|H_lYBHAK;-oURU}*ro`x|M9uvDpR~u?oUrt<(FN(sw_^IJEDKR zqrOtK(2mPG6m{Q;l+BUc=EsLy$@?AkzT0hfmB_Ygnwt?M3AXq{yB9-&gye9%cmAb(eS9 zV!IS@8PG+T7yPFy_WA*$5=lkp3tnY=3$n)xy6X0twN8HpLk>w{%1&1lsG8_%IVM!+ zQ`5NK__ng6xxBjy%~<_a{?qA>CR7WzCqG){>+JqoP-&~>JZy2?v2*`zYA5Dzrlx#@bg~z`7^dmMD4SK~v>kkx zTcAtU;}zNWsNwhT`!hQid=^r?S6UpO#b=AVb<~YlX;zLdt~k<4O_40N+Pv00&p7pX zGu?L-O%dGJ#&!hsrYMO8D3^ z#YULN`)-YP)V{VYRH*P}BNM$OqX@XUIx`?n!KXLc*}OJ&<@ zUQzpmM$-8?=FYW8x=5;3m80W?pgVy|pJDg>+j)gLICRyc3Lv zkeki*peZ1!^zk%zBd;WBXh~JS=@6=-5);I=B7&mZ%Vh2LD~Tn|s%@6pNJClL z4F_>(2THwS4aYYgN%D-+0&u%9~!!Xz<|KY5J z*0l%!&}Y;y@2ZD+bx9Ua{4B?PWY&FD+|zzD0H0PFuV<%I8jV^G?@AaKY1IoiEzHfu zRKOrd4N2^3?ea#tIE|V4=MVwNwG9Rh5kM(~WySoZA8-^HLcQ!iuTHFEOt#i0v`L6# zsGTD|LYWjLK778wK{ihM1mKrtXxD#&-Itagia+=lYm~y@TxTIvGS+w>TO>W*$G59U z)U$|FcR5>PTivt|N4HCl4|h%oO2RuIQWh`UF8>1XT=-BWd0R!|FKB%5SK@Y`;guDs zMO+-AjS3$(7EgaOq*f|=uWQHm>$4W;a$%+Abw_D}oYzFyTTIM??w$;eD>TgqaoaKZ-szrs_oO~fit4Vf(4$mG#3=>=beVD@}KNnGxx^YDvcS~N1%Mc)q zylq(69G8&PnEIq%fOYHD{!VZ`Ui1KhdRLZ`Z)m+WoGs*;=iSlu*5@Q8-+$hIyc}$2)iDZ>AWlv*F1RcxIFt<$=PQ z3^%cF{#!qnl6MA$B_YpPryH{`zG0MwxlV-%6RXtvms)A{R4)2CzDpY1Pz*M5Ud>5W zy~q545LgfK<0;~Lx))g-8Lc?DXx?IyW@YKE046_m-W*E)%Q@$ESzHk-Wjqhndy7BD zUJHyFUUHh@>t>uJQ}&S(V=GMUw+n}#I`fXZAM9?fM}r<AH>>IogK5d6IRqx9liZj%%&7 zW-o|XJBeVJ5h{Gm-dW%h*5{`;ixfFzr7{5nIbjxya+p!IZcVLPTFA%E#>VYA&whD~ z_8<-0e5K9}KG^@?$9Q5aRVXZE47I^Q;`>}ijYq*qxXZGNA{1ka(pVm8Hv2Sw`ztI= z@MZqlD|2!7(a(x`my_B#H0y>N(OYa`+bJu<{kZvRB27fq*u?J^JoVBOy}c#J*cHTv zr`EMUeKO0DgB2Y{yAuq|^fcE~;t_mS`oi|V#rH(TW)V}Pl?;gPHig=+_iIN-5^`2& zIbFnm+pK9@9kyP+>q75D_#MisU^6GeL6&76v>eHzm06srQcIM=^eVNCjkf?%$qdC< z+m|iQ-8pdjN0c)?$$RFRI4If#v9_MIu9%=AcPRiJ4FBPNv#=H>H{2PZ%+*Byerss` z`_@M~AsuD5D#%|Ym&r`rp38SU<|I;0_FcKKqNRK?7a?|~i7}NlloR2r5S2(V$>%ht zZyS`*E>_B&)h{XeCOR!jZ#6A(V!vV%hsjMCs`WsIXDY~$qe4Xo%DQ#A)f&7DK5M`2 zgws5c#48%#swVk6pi9->*2*rSTu(IK8oSc_9egjr%v9qu6{t;I^7QVYzeFC7d}Vr3 zlRlg;`ItHC&lFFB&E3dJ!r#3URq!1k6xCdT$1$2dR{_!IQbt_y@$V~PArZ{dDR*)y z{M{vv1!mLcYf$F4UVVM7J!!mGkFv3 z1Uo<0WdHM!KzUvWu5ZD_J}!=1jO2*y1Bdl)N>gDW@b+)h2^MmeVS|(SdZO&7huGYT z&$MEHU-0I>QO;a_X?jCo3%oQZLfV3U@y#@Ev|Xq;&;p*^a}hkUubdaf|=ZIS8ot z_H=U?>z2U-vBO+^I%a9^fzfuCbn;oUW;yKIM|TA2juhWbJ&{*A;CF4F=fU@_wsl4> z>0$_?aFahjwF3A0$gD$UYu<6JZ9)B!unh9P3KINa^nQM{^(dT6Y`Y+I@#c3!+Zy%Z z6~IwH0%bhheihSxRYcmlEi&peGMRVrjalM2Y8!o ztYeOq*L>pM0HICvQkWr+jt{;13}%Mr*-Yb2VDs~F2ht5i^rVdCK2MVQ=lhvPIq8bb zZ|7Utp;1J6?Zq_qK3xnKO=}AYuWaQKAU6+)f~L80WqTp9jtI|R960+783L!SPG@J7 zkDm8-yPW|9j>N$_Y=3;9`JqYqOa;09u)2>1jmCTVsbg9ui0ckds)@#hr>INJa{Av) z=5JDY&AWcM38lSTo?CjjNN+p)DAzlVU}>SQO&MlFywz~$bBW|Fgt#7w>T*`Is(ZyJ zdCI5mGpO}1WJs}URWj3RA)kLH4NLqsNa_`ntYy4#C#CU$ph+-;TJOVu_0QNVjN-G7 zV*a*^JCnnApmy2nAdTb>fcG!tGGCRBNhu%ww3Rr;arW*WSD=kMQy1AtB~%j*d5j`y zUxcCh1D)vu;aP`f0yi|}mqWRGXr!+Zh9aQ@8%Qn|QlMVxO_HL>PW0D*zz8D$E=It^-SueT zMXkN82{`Zcm&kwoBQo$0MWQbNGKC40VBXF}jvF_r48 zLZ6>Jed{=MZ@JUB_v@xj66_z90@z{+jtnpI?i>lSy`|gE-6tqV7_}vDYb9^bN+cIt zJ6Drx!i+3MOGC_7jr;WBlM$32p`VxHLLtCSuCcE-z`3eJVest%B(>#Ks5Hdzs1#7n z{R{~`Lo`SDj6R?#14Y7+A8%?hE{Q9>dn=j`-T>VGG<#{^{IQ2nT^ns2ZCio@l%m0g zN?54_M@RKBbN3Z~xLR|!NId!>)aIHWaNz?;fhFEINGcRvalahN0#He`Om!Q|Ma`|! zFFr7f+ov19iwYA~IkIk|SN%Bi$v3jDd6@HAXXzC=rR2w}5JgYVNec(87iaJ%iVX-& zsd_U2U~tiZvV%~B6M&}3akd9T(TAmMX_}tk&j)%re}CZBeyf##lWObVLK}z{Z$s5_ zekG2faa=&{+92qn&44{ffcgNG7Hxb?Ncma-?t|sS1 znLZL)aXYS!LQdTe1Q0cPE%aM;&7e6x@?@qjnTn1uS4gK_gh0Z7Y3+WJmQ8_5k=y;+ zW4|-V;VYS3|N1XzB2wsW+wxq0fGZ+q-M+!$Eb{l;q}X(^TWrFh?2TP6DwK>7PCPYa zJ7mxM`p7{sr#M04CX7(60Mj8!T)>GQ~*Hj^5NrdF2~(Ll>4;ee9r}b22$cZ5UtS1b*|6BLsb+c{gKTN;o}< z?-EL6YxOeirMpq8Fic(uj|ifyo}S5Iji727;`Qu9;epqoZ6O*$IfI31t{ zW)Z7E1=gT69@!f&v)v$VIsbv(C!Ju11>W%4%56AoyWO+`tppfhP$da(6Z2P>q=e9o zdjH@%s@(Fh%1svww{YKnSND1hUFvRKo4X!W*Lk`O`?`(p0w~*9OCJ7i-yFKl;cTxS ze1(<+&&oMKVi;4L)Lq$`kj?5yQ6*jB7&a`cyFRCc_kFH#`*Q?M{wD0$fHjfL(QgvF zjrHU*Y2u+G7#>Zew@q1kJ{X4(gbG`MUb<1?|2dK)5atTH9?HM|Y1_7=>O7c$gz)0U zZ$*(Fj;^SDNUGwI_=1BQFZXp*GDC(*(t2Oz%>q+@-$TS$#GvbetCKL`2v9RD0@USk ze)Qg`B1UX|g4?Y}s`lgF;nkp!YEN#?m~Yb{Yx@js)A6htgti7{HU>Yy9B57d|BP@& z_p7{G)~IQ6+$GR=gyxqJ#Nkk=`LwZ@_)!PaDEBd*lJ`+j&s=&8 zT%~5CjZj0f1$NE0{|^s*Lm2!c<(LL*VZKP!7T^r&#f&&Ems&qlQ2QOtH|4o6i1 zwN$46d8e>`Go=zA32#;5#Ry%&p6nfy-zPJI8As_03pvv#=@RS9jXncY_g^IhN*=GM z1kL9z(oSk0I)!SGMB1{9a(mG19GL%IzcGVaW^ly|jBy)kR=7^*IvlHP14&+e`|*=D z75X%q8`I*?{f>m;LIm{C?y)H5ysL-H_Qzg;u>dW+F$Y_NkJ1gzidqtTvCdUyoA|TM zZcl<%t7|eC?f;&T#91NUh8MbvxDFiKY+z;^A@Vgm_-81dL{uPY4!4M(G3>0?w?5SNKN_&kuVUgvq$=M6U4aOY}H;SZLn;|YOS$gPp8uN{m|$_ zzh9w?P)y~TS2lv=09ETwmU0Hgue9%m1TaTw8`}S`HJ=!2J&0~II~1}7;sq;}o=Sf~ z8Rj~Z*fSW-0>ly%Tw9X50GGxcj91-`EeOey)-#h|>btsPK0pJ`((grpvIL`n16`JTwe=OU) z(P!Sh@VDTT9v_B13BT;djz!96HsT_*y{w6j!8`l>EDO4CWk{yW>8s^t&Q*G-=y?sNZHlYJmZ- z{WJI!#T(4{MM!ZlZFa$PtqZ_%s|^O^dFzwrF?JmH(oj{q8+-1NeLT6Y7nUNNIyyjN z#r5JW{C_$}Fqs@)Fd+^|RnVAyW5yQfPJr&aY}pbAkdD^^2fwx;O8>MkrfG_bnSDOd z6)P|!mmtS?_j5_^yFVp{M@coGgkcA5xR}{`pCi`h;GY>lQAbEg*rY0fsJqIOQfkc? zs0uVbACO@l047p>QPQOEkt4iE6?_kH7@}Pn z>>0~=kqB(KaM1F`*>j#?rHxvB3AN-&Z7L=cy?CWEy7s<)!G@G(yoFs~wz)(aQFzWvg zISJk?^j_j~)+3l0^4WQ5+O!mSOC5N%l0lj&lUJT(kFFP%uV~Qe@qW~N{Q1bRD1B)b zYcBmzrT|gt5qIANgi5QDl$*}Ge6&;ygNyZOK>k*ul z*@Qd$uTcV4SKlUl>`iCb<&YPWyDu2~U_5>Pt()9G8)*j}XiWjT2qEzk=K}-nbw5xj zJ5Lt$!|h={+7>sHIQqv3*$^%=M)Z|c)1>FJ@R;B&-;XD+6r~3QFq403Y8|fj#xyuB z_^!nXnE>MtJrf_spxcYT)22c9fWc93(Hjj=Y7Xa|RJVwJA6R}j^6Lb%4E&QK*5~G! zYy)!T-}%=Jl{n>&pA4DH1urjJdQjm>Kavd>&zK4n17V4J6G~%8!T8FMyGY?bs8EAa zN}DJ#7z0{|Ckoq1+|G{X0m$@3hs3fY?LLs>#^px$n|FU@*QUZ(z^=MD?xW2&Vd1Qx z%Lo9h*SY$)3`oczr^Xf3GSE{e^$iC?fBj}mBs|^{KlDMK_$sx1{Ri@s=7Yhz!OWACrG+r{Ghr0U?^O%4M{ zzM$N@X;58{9?T;(=<|%UvlgO;N;LfiOVqD|3pU~hogC1P9`3jj&@8w5HK8ImX~d*M zSeOr34M16L+??-`vg=p*oNmTZ3JIFpHgKg=alINV)i&dMIs9?NJd*mNhgo9$?Z661 z=B&E+Jm)4kSQYu1!T-p#r^mCxlDSDjR_#yIUFvHsHR8l^iW@pFlzq8P?8j-J8&_34&r z;Pr-{k!vq-m!``NQnb2u{!ZhO5MF<-`3#Cic7spy0d=ZS{}?aBfX-JLYk`&J#C5UN z@S0nMMJlTf1*s1h%me@hBPV=_>|XJY~`w8 zGTwv6HQbn%ghrqOF6Z4x2l}az)5MP>(Buv!hvuA`IT|jTyMwAlO9l1sO405a48e@( zEYebR{KpGlOz+?19|5C@H8?|j$M+d9vkd$dq2`+iaU{IMa!w2?V?{ydFm&0x1Npni zhaL4GifZMQZh9xxyZGZ#@k^seS0uFCT4c~O+^t-%Jf@-a?uaoABz>LVV-*e3(`b_h z$#fo!r?2s@&6PAO=p9mij<1(0kxiwac9FJ?#Ub9Acr2B5uP@-M*n_gOM{OgoSvy#r z;B_co3$`k#ckPe1SMf3`x}wLZNRN!__8sdFfqE*h49P0`Kg=(icNWu}B{)hW7cAm` zB;S7;YtZ^10sP10Bssl6(lMS8i-myC&7%7~%!dXqgkB3NDkRd$b(z7aqg}slLW8eK zVJ~l9_PFq2Te{Q~v1DDE3##9h=&vDpV(G((?W&)Gr~o5=fqL>AuHj0hnuEkHtY;N| z;%@vRgsn)ND`4kkZV}D>wnDS96=*>Ld(hdVKpvJ9R#0yy?D98_NX2dwt?NV2<82V8 znlc_SiO4Z(FzxmI5}lgkZ;@T;$9|G^{jmVk)b5aeER!P`;04K#`u?t61nR9oFD%20 zi6dPQolj=w9l_fq;W7d6^xAJbS(Vr|u9^nW${g-G^bAT*82Rtr#Com=nCD|Ij+mzM}dRY?30JrMh`-K^K?j3JljOMRC zIgXSQV}gSZFhRmSgnU9ZG^0_>A5q~}FEA$fqw(+~SalR(N~gi5dzD|n=5$y;4mD9M zK&S+CLri4fSu!vTYiKP`sM;;f)?Jc?91ZA!UXlIu=A6xCp%=U{XYp{?0u%-K!_fvWMla7pZ z)hc9oDlG{Gjf8t6c<6(_E(>irtzuWEZC>WI!ub0p7jUX1psnY1yb_*OkO zM;@_OMPR(W6E;sXm-r|FR7(b0oefq*6%L+@6&AN1KSWImFYobg>u>?_xKQy)3tdt5e zIgr4)S^v(&&7G2b8l?i0;8_OCLho-CFZtSf4Vf8T8mn!56A#IZk{0K~r0`{p4%tkO zF)s;siv2E+G&!g!ijWymf0c7=^aCUT2{7=|Dge+X9@;KGDUMC>o@QbBkjpnxD6Xii9538ARi28XrbS zLpr(zEOeOtDKPLACa;=UnyCaO^#&z@!B%hg)g5w(*az}ikc3MSm1B-mO}Jm&IoK;# z+`=sSQpS1}@Lwu&gu=)_!yAjRI2+A22H#T(I_fuhe1=<Nk|(#}{YOjtCchTAmVisVggZOF$2K z8X*TRBhgCDa`31%CU=w6qV<+s;or(?cLr^H^Hti3*)gNN9(=1jV87!lvr%}i@h*q{ z3}WobWCst5*ya;#t2krIA7wHK`{i*}#r?9zd%7#CWn;oE&eJ#v874p+^|YRL)2YQkreNM^Bgax=>RL5K^!vXlk$aC7H*w! z1r52w-g*Z!7)qfk6I2mbf=Nn)xGPF{PD5Kd2Wc}v1laDn)~{?vqfKe{?AXpW6ids7kg~K z{ywRBHk~1F6J8Ao;p)>79!o@EMoM0;b^_|5ui-H)6uJAejNg~6cGt5f9}OFMrk%=r zs+eYj$^8wMuUrCNiP;;h4EB%A*dg$n1;kP06wkA;|AC1jI%9KLMFgmd=kOOBI`W7ao>c0>2>o4=!2)w(+UZ+h37rKT;W7}Jg1fE2-(T8myx z&zul9^&&LZqLU$$i<UK*JntH6;{yC3=n2 zJq65$=B%j23uj?;j@?L9vxv$dvgnU!c_ya!yLzo>tAK#psKfZe=0QFZ$4~^XSlt%hm<|;?>OB^P()ohu0;KJ=>c5aX zUjJHO7X`(bJLpiw{Cp)3VLt>x9|4cyLumh>ItO*pzx-|}PTH-m771b}{r(O6+efJ- zV!hN%ARh-J9Wy}9DI7HVLLPw>NzS9-eCDA;`MQk!ei!iQofJ$aL8X)@0fpZaBr)kU zt!m=)#@GukeZfjIp3}y7BI}6_?e~B6CAcQ5=_te4x&yW$l^VgdE-PJ> z&MmIjCmSPZGr#pQ)IfQk;PoMiak-!`J19Eqa;Ryx4$ztdX=14lkKHORM_+?KKQ(8dV@mE`vyKIk5>x2 zdy@Rn->M@6!Gz{B_CR;G6R%nz_gi}Gf$@6@UrNdlV6Sd?E>%SgyloogUVq7PaI2qr zi7v`(prBqAA^op61l#oBK1EYang)70pVxtrM?~K#B9x!h>{?9N??E1OMIjz-%c0`C zjciN|XA?THvKE5FR-~#l6cLGxgjEq;AaWQ_sFUQKF;Stp~uLXRF@Q=qRsa2 zc25SWiX?xXEG@Quh*Cu(O*g{<{}}cBZY8LfE9^4CZyAsNf&&nEz0t(%#;#~?nvOrp zx|K%`IS7#u!P%$M%}AkFMC)9ns={j$QZ)qr5xx};)5K)q#-Eq}Xu9!YD4wYvqo z6Plf0B1t(o?DvskHxqr2*#1w>e!!RI}LKJR3su4ujf}x*qI8HOYB|^Pd zC?ARRSQl_no8OyYr}s%1^G7?K7D-A?0W52R1#|GlMs=x}DZ2uQ9@`!%HDwxnr)i-78(cK1%zrI?XXIRr$_MuPRl&*3{L!e*A$AcyHA93v z?Xdggx*yb9N<+=NPL$R1pxvgLT+uiXAY%+zwedmH&U zwZ}!E))_R=of-m@kM|vutU+N!#YJmAXxiH|eY_EBS;4WxMcSkj+E25w!*94b#1p9r z#+j}#_B-w+b)IiL25OtCNN&;_>{(o#9rUU(sRBnYeav|10P^~R7gXo%3W9L-M%y4Lyr^^cVF-51{MMg4S$1v+yd zoc%?m@Uv-onQhg)GoVe=1@0PFVX1405o}15CTeP?%DCm-@7p%dB_VtHY4|QDEh#hk zyB{X5bun8WaLugLo9zyT-Xdgiko60SB_?qcpe6VNi$Z=9qR zQ8nTC-e49QhHq}%<4F5IJEwrwF%_dS{t@1ob5!{%;!7IL5wpf$Bm!v>?-orH-Lou8 z2VZEtv;C{kHQW4WlAd%k9$ZXLW~`CH?kk_sls6-Um`rhX1gb6hZ!})U3COXg4@}F; zzcQLFe>Od|Sx4+6yqRGMjApngz~V05xj)`uMFiiTq!eEcjqbZ}?)ohTC9X-<4g}v~ zWpOl`J~Y%>(itur2n0#50O^?-zzxS|Ji*KjpXR1;@u1T}rWQPti+yE~RQ3Jw)iG<} zdd#V2Nl1bAE)m!XhAb=Xu{05Q`Ud{YBRfi%zO>S;xuTi&PK&P01(gK z);hhrNGN<$<6C>e{tLW3RziT(JJb~YvzLyHzY-VuAi_|ZIE&|Pp!+aizpJ!Nn>?hI z-`mdDg*M2|BRN2_%Y}0=sNvEY%U_=f>43|wemU@Z>34j%BLdG+qcZW+xJa-4=GTKm zcLWQ2Phb7nod5gia}ATc@as~-6hvkDcgvD?S%p}0MK4!Zd}j5D?1j@}D;;@kN`JFl zHyW~98q(PX&INObgZy)!lk0pGewK|T4cu_F>+6rmY%0>xMVZd5D#%%m+H?+^DO4X{ ziS%9z=&6CshBw_Rdvdww38!BwyDyz6KU)sHdrIV7SJUre0ZvXxuZsSb3B-I^^7|E% znXsXYsR_9fkXq_VK$%6f2+n>7L;`?e&{cZSb^7@8nr;h6D+n%)V_kudY7f1EZ^st2 zLJ3bQyn-f-kXZUNJEFCgu9&p_0~?+LB^I3%KS!53)klhq>T(@d3mxvs zUAfn*>sT1AJFA9ODlQz?$AvirL|i=Ftabf}KNYX3h&+0|Fmy}Vzzu?Eq=MWRt_glQ zetGQ-hKAf={q}Pe_*P!Q<%80T(9TPL2e0-8nDpCjBtjr}G!*!7n*Ug)*NPcq&W~5y zAA>`AG7D~fBym?>1_$e+XKlIaR&uIDr&pIDCmrlj^GDOmpQz-nQn^Z4y!9vuUvZ>a zzh3cZyHO@qwJpG;C$U6#ODC_`s{KKMPLlb_r2ZR%uy#*}ekKX+!XW#nWvkDnlC_}% z%|FWf{uHI&6^Ik_drfp)(Djws2lqAYx)IyslX$g)l6 zUBjMUYUBkgI&EiIU>o78(=_z_0g+VTI0E5Oeqrt&&G>Ru@15*i1RA6U21vDj?%p!BMh{`~`Njbjj9b z+|+AqI_F8`@N`#k?V=kdO!q$vBDr_vi#&Cx31ldyVnF)pNG>Mlh}e;}7Mvv4j?Z!IM8j zxl(*Wx?LuIe>dv!P6ISMyUb){IQVyZKHLT(PXlKjCgQ&NO_aM8CG@^FSQ6W4+C>rr zkID**3im0m+z!kLeJ-1q;tXX?*nh`BtI%=3ul4nVVvt2>O0S!$+N*;JfmCVaE6?6b zDQYE<8qHrm6s$}ZMeaU)t!3--uc=Jp8mgox;c~(xkl|Q(;CPXIQiw|cfAAz`Ct_Jg zdSZnYL1>PRJ_YE`2(sjvIbNoWQ8VI3ugL#XxxDX}K?Y^fP@jaDNsxX>7i8c|(rf*SEnnDMM25|MF&aO5o1O?U_5-w-2G1t_1*XZ8$n z=QuawiUs3pT>YJ7g9=*$%E){~MSW?^fY9eF*nN3CX;%VA!Ckc1$Z43;Cdl6h9eYBwA5eygZP z?ew*5bEzpa$lPCVMPK7t2C~;c_}1?ben@0R>KfDI&(S*-uYbf-mY-mz8$n4bXdJ1- zx|Cx~&L{Y=%8F({q`EXwi^Q+YlIUQQ5=PbiAzTID0wpjN(t-z@=;{KaWxAayZz^sU z7@u0W!@>rYh+dCgLwv!EAXN$w@d`MU3Ng4+CQMbe%A53y!BAD1@W3}R{OLUj{V2>` zFe-jG#tZj4KIb@kR4So$AmtnQf1YavfVU@N{WJjZ#tf(SSx>@Y3@U*_gpKL@d3?c? z%q7SupL5d!dtT?$}i3owo@MJ>_wzPcj0NO zOp!i4DMi^OO#n$j3xCCIDr9U+$DchLUpLp38CW2O^ z&wuQcD+J!@TYwkW<%qA$UkD=o8t5!qtSB9bT1woDeMZE~Euk<8mcUA4teWaay5svtRJHZXw@k7X6W*d>z@AUt9o@PM!s}K#qSr!@vA7Lz?@T=Dq4m7_6G${*2 z?pXNtP1B$*!J$-uUTL-(An36zU_cb__)}`cCol}#6N!=o5kxsNs+x$3vLV6+V#XmbQD{Oa1ttSX>nlV|hZQ0WMmzRxlO6)ja)F?6ih? zDebW5C~^{lE*}QKF^$>-@5^+)h#6Q)35XErOMGCD{%MqGmeV63l4)EuoCV30vfP&b zgsZyd5u0!z(EX6}IiKG;sf|gFKxTC{>$8*@%WU)FIcL1JLZp5sK@qusLoYo?_kvo#(o5{`5HqhBrX&ZLT&bLkO>`5l;^CGrNJ_ z4)^tDehVZTsWjzR&-XJk01LwqoN}QD^h!ucJe9)Ub z5ys3SRq3I(MxTy6>t4uVLFggv`Qxt}@4Xmtd^u?DguK5~=slbzFqFp8osfe=DY)Es(}%%*rNk#0NfE7WN}Aob#U4xu zj8IPuuFdEtSB}|1DF4c!pFFNxB`)bAS%3;1?UPYNIHB@J?2H}fY0Abb<}XT++xX!$ zWY&tm6IO9j$I8t%U&80}jbgrSzSX>KU`q`ECXw3zoRjrl1vEZ~ga5CtKP>z88BE^c`I!#~#%9J{`{sZh{8kgI1j_=y<514?oL0eC*$>eGC z>iny-j;ZtJMEJ)0II>jBi*k<3!ZIapxv-o>1d+rc{@zJovF5)sbcq}@UP}Q_AxLv` zyswHJ^L-=&45s);a_XQl&Uj1N8!EgVbX=AYzRqw`lSZe7CcsED|8c%IJ%q0izb;+* z4a|~;9i^aslFQK)rh+q}@6b^~QZQse@%rtx9twoHNqBoEw$Rb29k8#CBp5Spd?v4w z8IjFhs%YHrcPk&t_y`-BwKZJz=)Uqp2&;GX{nK^{+*Ise3ijW-E}b!zh(P?$imjM} zZq5H|a}Wr0w%=bAqX|?L({#)V`}Q7F1j_k1=2VF^_3dA!qb@6m!Eb~dXM%lr zHcvK&C@CZ!{adSm{ha|aqm36<6QK|K(k|~aq@zVAZa|DGQ6& z(kduF)q}8UE)uwkP1{Q;2&%cHYdN-%QlH31@%0}z8tTJ->DCOOEcQq1^(Z{b=h@PNHU$y@F=xEerX-!jmpc0mff z3Sz_`taZ$CBcP^SrUrWWvGY)0L)NPDB`;S9a9GOsk~{ zzwuk)QQEqK1Ln4vL~qd>ie4wh4!WrNf?gP4gf)w7%HNy6si!g~=qP>C=Vu)oM|{wI zyd0=IwhA}?>Fe0=5R}jY%!4v(!w)7xiBLJR9w4@|^v^PWn!ug*J>zCo#^F*RSS1?c ze}g4pX;>avfZ*jPgBMU7Sb|Wd(N$$NkSyTv8?b>;;?YS>{j0TP+6+h9bw3qUZxa+| zj}y^(?UUnLr-18wKQO`8p>!4<1N7BCU&koA&Z*G9dWDR2@3^hkYQk9i`E{Kmj@--O z(2S{lT_^K{1QhgLPtP*z`!r>2;WH_jhn1iISv~#N4hrmPgAK?;qg+5S-~v;$%wmT9 z-HrVH#m{3NT=3%E&y~Ot{oNBEUlQ|>HYGsA zw&lTgzX{7e3W+f4gOXLV!WrXItl&-t*r-Bbga@cEqe0Mq1pB5L{$6PrJ7dypZ^Pp2 zaDlc(XyYAwC_jaD-B3E07qrG~xD+LorqL5v8pZlYTKZpGbz%HTMD$p1S1jV0m$+sNipStEqJdKE_Y z2&IJMwKn`*`oZX&g7sBqov_y~-#Xve%R1`5G-f9=h-~b>nwfQPeM3?X^M=toQrU?- z9+>JN$@ggC#9FfrQ>ftR9^_R?LETELrh7C6bqvO$>?vmI!{i$dxk451L%luE2D2m{>0&(08< z1oV~pDoOfJ$oQz-iCp9>gFLx(OfqLAYXs%bh;m{Olh`q+DeX)w<$u0P|D?^&pbY6k za2Oh@h=vZAT}4D5tFDabNAzZwhAHbohVeTQf8=vfpg%1)4U@#qjfpyT0=fRDFkLI4 z8|8}o#aqaEKvonyAfpKPk1N6+5lRMd!S#Wgro9TSQ($2~3foMnl?ub#y*mPjO}89^ zef?ZoLalGV3Bt~WK%{U`An}SM-Cpq{%9#}YOGF~a=T-eV+D7r1b?*-^5`_IaD>CK-Uzsc ztNb7i3ngHb`8zY13}o)2eAM&zvt-Ul=M_3p1E+CQs&iV7V%IHwh8~5^m|PEgSS60t z*Pa>I@$PqOs7{i$DKoVXfW_h{gc-(iARRZI#3%4pN_-;V}9XwIS}x&?Xkea6^T7<_6Bc z8WvjeAL3{zW#SB>>DM+$NtjFr{spIn=N28S0b%j0_-trjB7S2?1%4#|_>4XzvB0?d zdO{Jls0b^d%M?#|T==q#B;H?#Zc~^16;0zE5hDe7aWtj?9-;_15gwbWZoFTmb5d^2 zFpUmr4&ghRv^l%ewP?; zM-13zyTbUWq%&@Te8tuongM%4&bk)w#$I9E^I2F}++}E=m<38PC=>z|}X5 zN4x`zLqmui(`M*JTqj=Q>%6P{X@`*Ysfq@RFysss+S%$0%#CvYceCK%H5^RyG!yAq zP-tl93zr=LjSLQvsJ1nl;Lk^I9y$Ao+2sY{$OVZYFAINo%x=HV)<$^mfSDXd#oBdJ zRo@F}43}>uj>lu1Fx+mp9K7vLc~axBgMlcjYawqS+EW_+W>{!`Z~NrU5|Hiz3)vH$`ISg>tNP-T7c`%ck4z`^ds!e z0?R7@JZ3YjyxaM$Kwx`UZcp*q2RwiFh(AYqL^7l?J5~`sKvv@25FKdT`Oi0uY%LUa zC5#SmNyuFsCT|r`!Wcae=Fn2a2NcBaXvOBclcCDygXd+zljC2gTw{MeH%Uax+@AT+ zgX*wm8dLqcqbm#MD`1TKVzZI|SJTfC&F^$qX{(kf#9Pd&)WcU%w*QZ;vkr@L?b<#) zGy_Q2Fbthif>HxRhm_LYh%_pQ#85*>s-$!Zl7fIrw}7O8A`KEMB?3x(*F5`upLg&5 z{ksp3V>9=CU+Y?Np1F`DHP#QfKQo_l|CeUg7r5YGF(ElU$4ilaQ&JSwt z3(~uFM^g0>^Xc%Pdx;7n7WwbRR36cCPxEHry6Uiki8Pu+=?Fo0-SA{E>DbiPK;|M%;IYfOD zl-UN7xq)RE*=YOcncJr(2YA` zYo_EtzRCK(>7++zL*BWEB?nx^RF>5>TFERS8cog0(>r>?Kvwiejlx_WI{(Uz))%!o zo5P4xh@h&q>n5W5V|NKs%34VkXQA6qg&1Ev)*C2eHST=NekS}ORY*r-2SLOWpjJT8 zIOTtf_R<}FWZ~GR0e$T&_MA{1UwP-u-4{-tMbB^F2+Iif2qs|_yvk2Klo0H{D`FO$ zHL~JN34vopmUjX_P3AuPzeD#QJ5TKgg+C8cdvCLwADVt?l#z_V|Mu)Y$|HAZZXm+? zZBjoqpRfAw*?Ihnl631K&-v!7UXKy^56TDz32>76;nFCAL0`V`wc3}lzT)`=cArl& zj2ZS`$_jCeHX5r#TrjL$sn1*jllIW5u}GD(=XpF6WF+W0vwg{}aMDOp4dvEC0@kGr z!uPAcg(A{+sdA_!X~&%!J&H(x&b%Db-TB|R`|b5&2>anU()Z`(|i3 z6}wT$)|)D?{%;)Kum^`}X0&Qxrp3cm7Iv8ZY3G{Wr z56Oy?Q8INwS8)=5sn33U&m;JqsRAMA)Q3{C6%2}MzEJMZHN5Gr-)xkqdW~xzt?nM# z!aAiPFAuR(hFuz>P|n=r$tBADPC2|%E5&5L=(OX)K&rTiPvYr^u8a` zaVaTSsZWVkj82&_`?zH))+hOTj_wl(2Mae@rdO4&AzKXH&aDHtR6Yyq*xeeqPIZt! z7`Jn45o(pMiId7o7asl1W#P&#f~|C}+-n-{&qj_3OcjnF6Xiej^QkgQo}MB!pnN|I z{HhKAvV8qo3=Iu7H&&&BlPrsWOK8~>zwYb=*U!l)JJP@_CL63aZycJG-@bw%2Va2? zk7--5td00m&~Qe7XkyD)IV^HzIwgAj6wNuul_;ZJN+6wyK$&+R3v+S#SFZ57ayO8$ zJqb|VU*B9Y7R4|!W;-*$4H5~-=wC_PEUoU=+CtLjJs6y74_u+m%lP*r1ken~ zOp&u?bPKbTtBqfS3|V-+2K_3RKK(dt1WH4>N+bo;y&~?Hag9Y`_?^)dEICwN-{Nf5 zLaZ+|oIAZ!OmziGUbBNv>PH6(wcgI+ar|efOzx?Kl+yy|rh@PE!Ih*lT=(OzT&)od zAmL^_TRY@bD4yBV!sz+?De1mfI;W>!=szlW*u_O0ixQYsWBO#1_z-K3{`X(%r+Dnw zOwt`)d(we)%G$DS+~|Qu z;;P(Qq2AQ)pgeQf^P7iMl2vSdv-G4~#8p5vG36+n8JHgl{8ck5 zi@WAZqfV{xNh;o&EdM&V-RhlCRLXO9+XUT9;!^t${FV8oWZgEr^;IODqJK){)KmtO zOvq$~zaMT~Ys?@cpxF!_TNoI8h|qJSu{>Cg;b;R6nSZCRa+jwc)ZehL#aHb!ELA0o z!~2dNh)pGJ$)Mp`v9|8U88N@*2Dp6mFycxWA>59waVom5h8K-LKc9G%%L%cnqbM6| zMcxOo`&jLih^P^L__YyO9*Sb*UJm5XOxOc>8Ppnw`#U-&+b&@d7a}5E;QpI+*S&_FoW!xK(?%Fk*Gl1s8tO z>T9Gk@wNQK{)Fe6`V^NdV^BdzU39tydoHw&Rj>SQPVyVp4@+4Tw6FQ@>FG}A)0+fOC%PREw6*Vcdw z=K~;3Vg7C9#QNz1ld1d&&yt;I^9bkJ?rJ1Q{N@x@Ggk6LS1kE3vTRcj7L6u*EiB;N z*0nTCEX^nAog4Gy>22Ab}f8P8Wd&EWdJst8R>CSzJFL7_h(Lc3#IK3Y2Sg6Z0{-;p= zkEwDgpyuK8nERH|opWRKf*IYo?%*O=b`9Z6>h-h>Mcz)qx=>qdT~5+f^Rt9p4|;J+ z;%sjf=h*rdLZ$STStrf93x9bnlSKdM?cS;=&E!X{C5`QKzUwgPv8?d&5LOZMD-paM z$;GRin5F4ZR2r~T8|rx_bNCC?5&s%#P9C;D5hMT2?%!_{OXSg46zMi9XxL*IRc<@E zDh*y}*MLV6)psgNS$bALK58ZJOvp{|ZnyhT_$98^?(*dBk1rwOtG3p9GS^dn(nN^W zb1tvbMAs^Eh_gAix)Z^_GzUAJmQL!_deGyzm4+-Ge7VYy`$vuK3%xqQKqcb%w%oh% z2tY9Cyc@cy!jbi_P**!+{V?#^hcqEr&7R564mTyyxE>!&E)AlKCd@VLEEy)uDXc-O zy`sCcI5(lnXC@$F2Cr>W>!TmQ3mh8Q=L5tjPy zv9hIv+aPlBKq6ANLNZ`~6~ikLA}%3E9fICB)0NoPZPZ~_9HVRfkdRsVEIr*=O2AFl zpygp*i*VJ$6*XD;=GWY>!9)c+CmB#-|Nrs;0EeKJw>u14MN*kw*&w+V)PXG8RAB8d zBQ+Jp=U0Ddd}3TK!AH6g&p=Qo5Z+lvaq~5+FfprBnzOO5y9{sUN>*@fYLNL&W#=pW z*>z_LD+8LfD;}&VrJiIXSCWXr7Ko#@H7qHCS-Uyh{|SwZ#(H$KmuKR_CXyh6%MJ^T z`&%n)7VRRp$|S?qpicFPwri~{Ld={J5~v%wRXmOrka42kJ)rm*7jx^Xv+2_hoo{^v z*yHc6nn;{w+n#*tP^w+Y5~p=c<56b(8J(}HB!Pd5uJ~{hnwqB3TWNbO_lPg|q$`T- zFgLCyK6v9})XtZ>94dI82LYF?P@V=B`9c*m7V+<@B=`4J*+j(Z=S9YR0+~uy%cSwK zcV8`)?k+cDMW3LC3K~>`Pr=tP^70BX&w+C=^46T(cuc3tqR{43axG2T{+91!gH&vq z8icZ}`w1%uMX5BL9CJ0TOcNvXGnu%altrs(;>yY(PAeun_`#RToO16+l(#%fH9~CD zgEzdPn)ysVSaVHMkQns;iV!S?@fggaK!>j9&EokFEMzUjE8gbxsSQAlEp*#gdovkE z0D~;gWzPthdAJT>U20&6Yl`;}!eQP>No zV*PTVcL1?i>sg|2$(&d?9lbvk2=4UDPcv=z>T$W4C~*F2WC3}m$mO-a44K$Q9Qd!4 z7Ggubm$iiKwpca^R`&>O0t1t;zS>Z=A3>k$QO=*6|r!)rC|7r%jkc>kk? z*L38EM?d_WA}+7vcls`o*_`0npyBQNEbW~DGm`gxBO+(<_shjB$xn{>*@bzR!O^?} z%#{NGhLXwR+1wd~SJ}G$%)+yEi>IwR+p7(j=cos9Z5$_Dv#TnQkM8VH>gX*Um?6iq z2|iAJbZD+~>g9fIcFjzO^{LZP`|s__{+obf2|xfqzv-#6TT96M+ygV1) z%GL}xd~ok{k@_HaI1C0&p}@EMdxfDLgSpD4_K`f~_=Rg7(9a5%01BaO+S_kGkZNb1{CPDU*TwV6YMgXMf?<%(mD zkEMo6d{$^{@AY^+%kXjZMbt+AeZ~|fECXCd>7q6lyrBoYJ0D+mUr&sj99GCYJX(@w)up9^i-ekM)oE!c#`mn(4+v1M$9-;!Ax^am@lc zbxY6JSJGwRJL`Ls?x~N!o8IC9HQ>U4=}m-<5bCW&)l|A-<8lMYQM19Z9aaeBiEIDf?V9j-=Zs9SxDUR~K- zmn2Y(AefWcSBc7J!tE#<$0v5!Ub+8#Hcsi5Hmh>y$qalt2_gAp{-Aj%I&wscWz^H` z&wHB-Uw>`~S{=&xMYGPg{95{t+C++;NSSEjmGNJd5LIsz83jgx{=p%qu9G%b;{Q2H z4_STyFx?Gc!Tt~->h#Uma70A@S@XDwirUf4iWs*IcgSeR>W z3C#@6B|n!w-KkN<6kK4Ctm68V^36+VGN{=WE`*RvN}83y^`3CwR|MxNBdyW>Ndj7A zx<~329rho}k20$ks5IU%CB7J}C zk9h-P3E%>H$#DeVY>x4q(!KU2a;A{L$xZ1ejIzq=4gJ?bX`PsZF-r{!qljveD>}(( z#E5}tO-?Vpe2crO@@b+OIU3q4U#xv7ZQai-f+-3%OpP{`J1K6x*3c86jn*-K!q(3x zJ7@xdQ&kx}{no_zp1)3$x%?aJ9u|$3sXqvM?S2=P?0-?-{=+l`1Nq~r56I1=z+s0R zqS{m*3gDX&OtD?BvVQl8L}?UE97JRQ!;I%U^)Y%AH?AkhO1|SFC6a~+d#Xntv%}s% z`6AF( ze%$y9jW}an%4P@W8YvUspGC>xYT*lOjC2XR@zG(}cIIDT(P0HFSd=R$tZR(EFYhIU zq|dCymzxL`Y)T#jZ}x^KcK14|n)`Zod{B9W9wZrqX9tH&siJ6CcBV@P4S2hy=6>>Y z@$lYUF|()>do?_t&%M03taIHQMMVy*s*jc@8=-MPvr2fUx++cB^~EX%CULsXms;YC zsb@7xa-8}P3;FmKmD-spwY-uc*7>h|UILj;paLUEbvq1sXOthO9AoJnjiBfnhqA0M ztRXVgC#1eKH%qONl35SXOf0TEG=jp~;-oUY1IVq;;x9Zn3peKz9>ZP@e3J1np)!_sL!16|HXp-PZ!U7C6@Q%V)ybwYQ{2RpG3(`DUx#p1x(WP z_tfrEtTqM5Kvbf9+Q?GVR+TfZy5PMkH}lGyh)V2LDdM4_im6A&Nd~xJObLIQ---V~ zbS4?l>(-c$J8|u;AkJC7nf{+Ed7MIvP`|<9Z^=(rSe|`;i?A4$< z`rAFw>xcnOF{ASQCqvSErjG~;EhG>)U>mcM00evWD!?8^%7-uklarvLhu>&_(bA8I(=`WmKB%ZYi~$R z?-tjP>J$RxM~@pvYaV^SbB4`c#BW6yk9zB{6+-TiozPn%>DBw~j0mpqyP6vUnK9;f zs}O%Wen7xP{lRPk-cCiVzouS_4sYRmdL3y8)Xo2fK@?V2FL_0=iTJ$fv!}}DX?UUZ zBRs3+-)mNw_2?al|Zq1OE)6-l@5b&+>yx{-j&+tUF$`jdAt< z5zbIjga4LcJ@u#FTA^p;oYAXY33ba_d=m+TTDF4YK_S~V2+6D=N1U{Sl#7}6ivZ@1 zRNN&9y%1lgccuVi2Jf-=ePqo_tMZTsR5Q)1Dr(R`fSzW}@!W@iHi<6gU|o`7EH0dQ zM?k2F&8N!bp5 zj;FziIBNfi~iYRnwrng@!gziqY<%$ zbHPn+7pO}j39~YLd*rB=Oj{Ox+U&Yyqxc>>cLg#c@u<(qro0VAT0J$K}TW#L=>Y+G5NC%q@VC-dxnP(=Ih z%J$!H+W)-40`ydhmbHfC_t(cr+!*s1_3;L%!>3gZv}me$wQvF%u8_ffP&5hbjP@cs zF?_hib9+G#>GbtPgjHsh=IaF^r7P3|$8oFMC|ElGze5=CPCDE|aWL^cVXPgG01_+&qc1U&`K=4pIr=wN$(NlFG7Xe9{u@Hy~wZBBK}T@Z1zqkCbzbftXn`3Fsh}xO4@&TrNRC$z?oS1fT;FeDll8i}uUi z%d@=8)v7RVrQL5N+m&hp);AK=~EgeG0yl z8{H|T;hWZxdME$@QleRzzxVm>PgrX5Autm@hCb>k%GdVMzS~fn^R#O} zs99#zaps{Ia)jYUz0-SXkJ+VQ*fKzZsCui`o6?L#LB=glD6L9KmbG-hohe@QU!Guv zZvXlIk3)}4Lf5wxtkj(HS^pC>WFfHomlQgjK}r&Zf>mqD4+U{4J~bdC22^C&S{&>e zOyvfW9JVEk2udI#Vl#{o-*+vqEFF4a15tcVS7lilyPTg?4-X6#(;nrm|Ceg~OFDmR z@IVt1*5UNOKM`4u_FN_{Xi;4e^4dlf@d`vjIK@|N{plSe}sEJHFqnJ~;y z)3vzj(MSnHdr<(Ly*BYJ?UbRf4x#T-2uPx9?|$ESNPOy^czN;Hk7{<2XWXY%+v)_m zOD-ZOZTuBtLo^saKM%b)g;(*^ci#z=r3|dT1VPCHQiH{oEuDQ?Jb&JQ&!;${zfQ5@ zBmsvj@u*UA93n|&Y5KlT>4Iu$Ia10DqUco_DzC>r?zY9!^okE%F@xK5l&r?ztg z@5aM_?Q`g=px|Un7(XGB`z&|PlX-*a5RigDR}#dOZjB}|rRGeTOiBsxDf~};heenI z;mUA-F;EVZZ9z&PJ~Rj%2I~i$>(Byt)@jVNnAHho#0~nhk;Su-ypbi*%L7r84iHlF z{4hN>=hHTB6C`lzU+O6UX8p&N3T+gWoQ5>UErjF?1}TwWaa?EA!@-lYe)h{0=sRVX z?=+-SWc#i4yl}R|+)&+Xt|L^P-9!pNzDr4k;z+5c;+$PcV0Q(xWS_sQUAK^aaIf5!2zB>kRaC7VT=yJfIagXgM^e0Ap*qCYciM5tgZy4GT7 z^eyLR6)WmQtsq2thm(dipb!G%T}#|Kl3o$>TT0GbK%LM`_@|4f3;>`(fI&RCJ|F)L zznU=-v2w0P>TJyvOOQ26kZv8cmCCIx2{f#E*OpEwCviWjtOqisE(-P)!?VH`{XAhL z*zE|vK}E4^%n2bBV$xg+b97i=J*UCP(8}}O8aRdR#uL=NL}$n+u(JH#P{&8)A5_iZ z4ZCj34a7$tZz0!^^FhTerate^o(L>JbTj$!R)V`nR7qeezICGjhXY`4px?yuw>FS| zP%S_wjJ8OG?p_bV38vN)k_SzhNuUYsO<^wV@h?zDV7`s^$Fe_wzq4{jj+dx1ae>!-&xO zi6Tomfa;b=mISWT7C2Na7@B8Pk<&%mM_=}YV#7!=la^+NG{Qn4Ta{|Ig`m+feU z;QWfy(Zy-sL>-e^^Tbc)1l;hG#tqT>esiF8vwVLEr%l>w3t76s+n(CJn#J#QH`-e= zEjc#-<@FS(ewvq%B0VC5&(9z}+q2c}Z$}3-@eYS24BRiQRl|<8=_F-iRepvsT7nB* zLk>k!PSJ_fP}E!FRA8Baltp4=<^|v&H(=9UkSv9QNQ~mFZNJ`3vz5l@29<~5m}anh z!+|hq_X%J4Auh%umYa^t?@A=9e#$?Su)ra7hoq%UJVQew^Tj&zY1~@=lDRl5%D*v+ z1%(6aJVEbIzJHAf=-B(`Vd>?v2?{#~g1`CNP(QxSw^DJ8-dJorV5WJ_*5anwch_3v z-s^s8g(A_S)1q>`0I`UIy$lxwq07_b(92`v<e1Ya})UK`Y}|u-&RaqSn5X;v@ERc1VE~u(K)K7 zc2h6=NpkMP@ckyM-2e70m9{#wr0&H121P)7&dJXupca}35+`?0;`+mzde7K@KW(v2o}UB=3-mbF6O`8)4w&;&sCAqsNdGf^T{Kzsm7gF1E4FfrShAttg0viDGq_pYyzU+GmJY7I{|Mg&jx)O{9;>O?)^* zy1m_nhCL5gAyNJq%&bH+i>;L($a%#H@t?7jZUBPwZ3@R1Rf4CCE%XUlB^6$Vv~F7E zaD9(aP2C-xG5^xheK>Qzg2sRq>5Hu8AYD7_!N2!cJv#ZzatzL!D$Rd-0uiU zv?X90MSH~2Zgz(AidqTYml{4ATMjnDjVncAsZ8B!tTe5bh09EfR zWXq`-nZ+S*c-uL*QJ832S;oX@zPQ3++tlais_&au8@eg^*QFXoWS1XF58#Jbw2{Dr z+;Zj#5?iJQZ}~%%x$q={k{v&K55oWX_i6Gzk4zK={FYw`@8e}yh-@rY1vH^8+MCNM z(-U*EPQX1P`pBY(K|MuyXW@~)Mr7|UXV73=q#ei!SUdGwBnJs#sY>KBE8C9u=*g~4 zeHP&tw~aHzTXMzgOYd`*ZMw1zb zEB+%{$a%05%lOeK)DbqtAqk00lpfAg%=54Wwja|EhX7h$9Ic~G9%o%(%&EXQ`%5zP z>J{}EC0duKaqRiVPp|UZFwjsH#7v$JUXxpj=`^Erjm}}d6_Qiy{mfA)W}2IQsM-)l zBS^F7w`^8S+#dwD)mjc3STEK{HjGBeA@D>0b|67;TaKia^TetWOCWV_^|2j5Iy>{y z;{MLd*Y?+g!rpgzE`N;PxWV`0yE(b35UcXAgLscgvM6Nm1v%@fzR>_`ai7+4U%bhQ z;=U{mUwCzDGI2z;@yC)<$7eNUhKPW%`;6dY6Bvl4S@I7+)h`K21<|)Mq&q)Q>lH}N zE=_-T@q6cg3n?tBaTX0eoC4*W=cgAq#Go&0j}vPHD(X8k)xEA5sy9(H-j>=k6qQ7( z^T{X1=q*#~9E{Sf_sO-6si!N{Bzgy&lq`<<1o3riAJR=p|AmsU zwCB7ARhNG`#s0EX;&Sf`t00Q6eb0!zQ}H>+PQu^;gsoF922H<%2+Q1g0D84bJq4KM z+Y3-QT2y$*opcY6+lB##(pU0!NE>4MY2C~oO&CK?_y^3ROU%U-`_Ty#YmOetq=T$~ zySAb7N-m&leE4n)w=$mH^1*Tx5L|U;=r3~EN+K{iDpgJe8q)UqDKfLPag%i8YlyBw zZThwJBEgY0f{(V`0E<+(z}t>|QuANba@3wGABK0W0c9KYFfN8L6^-MVwEyHpZRQ% zHz=RvtTvEZUfsw{pVKOHw8Y1d7b%_E&Ge7o-@H|t?R|StiH&+F{H^Gxrn@W)J@(5e z6zjIo#a-ezPSd`4ZLbLS^UqKdbR22Yjh|l0bfkB?z1P|9xhWcH_jKgNm?zveO7_~` zk#8|mDk+qEso2H7p|feqBm->#1mqKrypp97;cMUx9Z}lh%=N z+F)d=M@6)_GKlW)UxF=JqT9?Tkg76>F8G6VDUM6xKp8ijL|1)|0?^(49 z-tPt8RN}yMs>M5X*e@(#lgKSGB-K9Ihheop@ zOkS}4EVb_M(Ie45n`N0!`_sdBWy3HEj(ZOR7$kc`Q+V;sLuF)!nqg2 z71gFAau;uu^u>a3m;{BMowdNm*G0 zpn*zJmIFuCpx?BY!mH+??=pYJD=iF1brL(@z=4FAtPt^ncQ?g9;|73<*Sb%?z*bJ9 ziWN2)6Zl^vbhD+GwUq91DoB&ZAMxaTs&{`-`={2+_!?y58mEug&a1C@-vzeF| zeRp4H!|ePEnBbLh?tzrNAp^YIDbDqo0ri8DV)_JzZ=bLI=tj!!n`J)1@hwt%q(Mzm zVmP?`&zylb_rXdWo!}<&g2c&N!B(5<;3HpqXjm82Oy(*|;3)PXV zsTnPTFHfcwKtT;VbJ92YN42~0my7K)$j}hpB9^QmS{??}2{u7Zy zCDO7+^DVkJj#TCD@<(rfJ}GjRf&TH{_B#DjU-D)D0zpikEs;C3{X*fA%tr^2wL90B zVm@}Fk;>`kW^J!vx;|~o?#@?wL|MvqT;fhLRU4~%n8Ti7{MKs6$&32hZ6pc)PqG2t zh*;>wSxxBq4uh4y!4~eGA8H9lzWL{bcK1^|C4!hQGQ!%|h<*Ix-r)Oyl+ubitm*KF zBPD`~MpUrNuDck?-r8wjh+9WZp1G1tNVaVlPAfU-pLOsUP3|^uapP$iMJYV)5I>|; z@3}vB44tNBvLBf4{_~ZLLO|DL0T66%%|{*=K}HJl58p#T6$a*g3cdb;eyL?vP@$DX z+EiT$C^glKYd!MQWNvX~a|vE{+gVrM(cg(dbH3LuEv-A?EEX#F4eGwg$|Rdm)yvm- zyFg8tDWZ3(4~`8JHzuzedE4T($hEt{e|rdpkWF?L>(KKf>agn^1aPQ4u@!wMa#px1 zZb?RUUnuf!Sx%G7f%;CWjnx?2d#$ixx($jC(cTR>B|^g;dmbM%>%NP#AdtG9j>@I3 z&Ue{-+%oz6!gr!11s58Tr@CtK)p;e)yyTzBUp$)RwEgey2)*lDh~lId`t0)M-AE;( zmCaW0%*RXm-4mimmED&cYiq5g2VN7?ku1|slw2`RGs*-%;Q6$dX_KlcUDu0!ZVjUs zbcq`!$s(jFuMM}f6466yRAFgmv*xve?wSt|qIv!5GOKxnA*s-*ov<5vkyd`aL%8E zJ0E&092WAYoK!V)%F0I?N*KtVNWLSvcLbXft=9M%rmJgZM)Cwd7{gv(WV?C8Ps(U? z?~QZy^^yE+9N$@#K(1|HeF+2@a2Jq@gaBNs05)*UX+Bi%(?I>T@&**q0KfiwT{N_1 z8Xe21WKF+5wv_J;I>B}yHgVMi>sMX3vkC*6!rXbSgy#JDe-7IuW>?$r=(KqIe$!<(4GXX-cQN*Q_33CT~%?{ zQ2{>GzbLzyS@grCt*=onW9SoaS76rs<#TO-Jhm>CAojW2@=w zMK|01`KwzQgvW{io-&*nNkk&ZdXyhJix;#+`^Igu6kAPzro7U57Ink~KII!=xG0WQ ztFuUF+K1JX5yn`QfxMJ+*n(v1B@cv5(Bfz{WBiKalA!QPFc{rQp+es8QS^SDhs|oK z0e0(lwxA291G*n{2SFpRkNX6#m++1D_CW}YeL#m$!%|{5-cUbneP?~wdyylboh%kIw=+F0|Ojtv4XG;D~hb&B9vDuP+`W+%WeVtbfl z`*~C&TefP9g^*Fm&PPbePA=k(H1@hT`U4USTmUMEp4UQFeS$qF@|81s8WcBT* zV5!WOZs+j(1^-Fi8)Ic14i(eUHl*-Gva|%I$Oay!8w5<>VHvG;QFG}_I!zIGJ3G=( zl)?aHOd}NRKLvxd&{yu$0#l3bNG>993te5Rv23X~h*&&jo1zaPpNsl~(syrG7^bv( zmW1OCmb^KCUX*KbxJ%he9?<7Z->Xf&cY^}+;ed{$1;0y}8%Gw}Y9hCz{&O&CBqEKpQ*RMKHc$}Ew~6VSTN8bE{&+r&aj7%u^tE( zu{n|bFi67Ruh{hs_INXh-`?jOh;OrkkJ}W+o&)9|Sa|qM+pk8TvQQ@n999$+*nU9s zi~M+5hBA^CcVz4C)}sS@KH_CxEfSiF2dQSn0Zmsv$qZ#_d(Gx?<1n_bpOPr=44LED zAV>zks^gnZa!cqcICA%8y-z5eBr`w@sTes|o6~+0%(Z;@W|pYnAx4@}1YS+zOn{7k zd-Tx)JE6os4>njp-JD&&aW#PCA|i8W>Pn7371fO)`uS}3h?6JmI^SnHYU<1u{(9ON zb|}u2nj0UrgPuRMZYy)Y%I!;8q z;%t8%LyEv!)iyh_gFl0e93x+X4@XYj3N`S!aVRnPhTUI;RrIWxdSgzO=kuw<>Gx?p zeijfq>mY_w2<{Na2=t;g1|E@`wk;oluSS;Bhdkcf;N|roPPZ!C7hB>1+5>6ns%n|$ z&ufDnaeP(x3w|moWQYh}Szcoa(ECB+t%TU7xue$55XH9sC*onSI9obVdP0_Jpcq*usoVLO`ov_l+Gq z<)8@5MtF~H%*lDg!yBwRtuAe1n?v1G(6$`S#l5k2&+G~xN|2#}ipiMf;4{1a;^(jT zQ9KpkLueKS{h(9$CHGxWiTz5-dB#3Qaw%`+Kdw4U@w`BL7~|NT5%`!rZ^ZBFQ1>I% z(1I?RO+(7|{LCzhKcmF>3cXuj@_6jCuOYvC6;?#J(DD))5SFAqleyFUYVRS}ZMLfp z63>YpCyIeR@9SA-^knz0&oE_{LB6GEHgPrqMmj7~CLg1{zf16Xh^d>#68vCkNfSEB)zI&5dZ^uDOR!Hb`Gy# zUSI2olH+FT{FxW3rj}5aBmWOj)C8aJ*w}#5Mi7#=OO6JnA4Rh|*YQH2X~i9hYBM7+ zpJESk7G=CqlF&zc}a;fvB2NFmoTkyZU6^L*N4u-z`SZAi%fjeu;Y?RS=&uK9^ty)bCqZGTDsN>0Hsa`n z)^>u2UW_MFl%auXe`Ls7(VBl2I9&`qlU<)(+v8S%+LCDj9Oz~6R7=3lZ9J%a*d}tY z+sx&8BZzYrUhn88L7sukklYmaD5oXC(?g-*K>^4taS6nJeJkvh!kMtUSI!Pn-UZmc{O;ayB z4Z-ukl?Oh@(5a1mMLd1x4!hMnF!Tp~*vv9>tZ|YPMd?ZL;lQH)`P(yO6qM&_W;5$B z#x$*hB=&U#Gvuot<2%(@+@70ax}=?@W>!MN24zFa5=L)4q#Gjcw5+9S10u#dK%fB@ zz!&8<>6~`IU^6tW{kuWyt~_|uG)=waYSjm$Sf>DYR;%=KF!xL zJcGlky@H}5Q1w*}POQ$sDP;OSwtIpH1XmUj20^>WLADRt4q3&299xL(9Wnt8^d4Z? zcF^Yp9}LQJECqRB(4ZK%2}(YBA^_9si5XX}(s=y+K!`WQ7xv*jaS9g)@4+47_g*o2 zeA!^ymPD1iac+$)`isH#z}J4DkBWhxpFkI= z1gt4hj}7H0^|_v^^y|^(d&~?>e5qsvR$zeR z)fTvvhn`cE3tF^WdUuX@m@l>`d}8-Q@tc^%*k9-Q%PtkYu=&r%nn!m*1UX}_svopV7oq1WKZZ)_Ee^Jfg%|M!$u18umpi+%=?BXnHS7!07>mY# zq%c7c8cAq{VN}i^b15zE*U)s`TI2x^_&Zfk#&E4$hpF zd{{O6o~l0kZxz3C`hLzgB8PgiG!x`$g&H{nSLLicQdgfONyhKLVb<3Y<0k3ds7N+E z74Np>vq)!^*3;LOhIbm21t&i3HxH~v6EakLaJy!0f-N-(`R^V5Tsqdn@+D_V67z?b zLrJT=yRIHn96&B6g^UvxNX0Ft0Mcu=+ySPNr zW}WI`AV#{g2JG7SWVsI_Q0KpWe)uI1yt5k!&01l%8v$!ss4SphK7{kd#&1MQLzuav z@ZvT#bJ@?Z2DeEbxp-Wa^>SeX$T*R-mEBnwJB}U!rMXUxp{7Q|4bl&A{e*-avjkQJ zx@$!x8=Z1b?@(lBIdHF3bTC)zWmyf=haJ4HfZO8c*E?n4iF5VERE>1?yyO(L<07ge z(7hKk7IEg&I!C;zbmY%ULm-BG_S z3^K~m&WXewEkI(b;X@* z6WQP|@)WTB{45_>2pjU#>?}QR@nBwNyg_q_jRmqnU(3N zGiDey2iu7LBeDHU$bk)_jvdP|`>-F~MkD)2N66Luv8?zm0B^evgnN&=zH6?M9k9MChgb6jIj zj23xhBBpTFcK?AZHE*B0AvI7(*|HAs<6c>A*ym3Uw*oUpYQ4I91J^Uya64<7F4Yuy zN%mb#G!SFoKB04%T~navMv-~iO(6^`TK6%bmrEf@{{>cn?Kvrtpc~h`gm;#aXG(me zPFD!Q<)9Zb^`6wd7B$vAMG^QxuD74R9iwoDq4&E@tbKdeSls`OVX~#vlX=b+$v2jk zowb3l1)Er)zh5VOi%jM)29e3gqPwzfdyqk}lv>}2wF3Ie-fndG_( z=j&^g8m7(pdLG2ro19h`-+S>&%E?c@k9|o@7JG$;fCP!2W+?-3a;|L`E2|rqYwE_b z<9ahILEs4}p8o9#Y+hU-bHIUHX~b?3I8rAubxk5j<5FfN0cMP5a0wdN?Mub))>@xQ zzE@uO1yg^OD01pnYVt`QZUwv@vlyx2&-Fek{rm56nLBERlN)7tks+!ANRu2^<|~@d z5&c`w&=nZcYK(B%t#^azgDt`-AC@oA3n6&_yX$>C0)TE|^x0tj9%#>LD4%*r*#TrC z(3;Sp)i#%#pH1_&JI_Cj3=^Hz>R9^_N^g}4c_+&~WyYCeT%Nz8Mt2&WGU;eB;aM&l z;;~+4=F`Ob_9lMobkCdyTI0TPi5t=2H3NVnO~1{s^Jz!fU)b}~ZT`Os;r~7Is^mCZ zHb4rN>-a_sI={+q2P8rV2Ao-H^U#l=IdkRA%^KJ!l?mLefDuJ-yyBwGh3*@&u@s ze^fP$UI&Y!|2c5Xq2j}pU&4u@fi(l{qa^9xbLI}ByEiAFw1WWykfA!f3$9~rOMHI5 zQod54oeJ&yIZ(sIdpaJm23hx80;KHV=4S5gH!W|nz6{8JvJc$6*Bg#_LdBYLV8+}5 zX46OcB98~0I$;mMd1~?cxs>6M_|FL!hEeITshp7Gt2MZ&gbx>m@pE!WPEL_r3ND~b z1qx!UNW!=3wVK`CrbOc2Ro3KKK{p&aWRXxcfkOO_lU{}PbFdRhxKNPz6JJAr=co{e z@*?y%rDPht^i90tN?o_sRy~@NG2tIHk`6BsZAa-ZS?u@Wl!iYFvWbAp85gS)|LZGA zBvc|Y4GOERetV#mFzgj&3ny7|qo=u_3nMxM)<0_1jV*;Lw%_z50;By&Hm;Wn<*|Ft zY?QDn^Y+)bRI>1^sNqp?f2lwIkQomhEWJk;g0c+i}&-KTVZSgCW6)@=< z+g{oV5lkLy#`&S@?MibGS^DJnHyeHq!;6AJ_Y%7x`HRRkzp04u|7~3B19~r?ye-)4 z7cu_vr3FK4o-jW0&!?jCSP+!KS`bMHng3RbqRt2$1>8HB5?m5rn!!w*KMsb(CCcc* z@GIXw5lmKwZ0CF2i<>N9aV+@!?Ni)Mx@8r5qQJ@j$JJZLMb)nF-^9?}DGW$=htiEo zgQC>XAYBSb4oG*GgdinIN_R+i2nvEj3rY(}J=fg(zwe*>dFu;VpEcJyuQQJGdr%m9 znELc@t)ubA6z)*I0a~YKK)D5I$UBfj8ha1~NCEF8<*IbE0LdtJ%o-9vUT0HF zUj@=K$mv427sagKqM9&r4eCH@8zdal2KZScD(AlW?p7A*AjxV-*}r|z<(d`B6)*$$ z{a>X3(A`%Dia^DZx{#is8SMc->K&QBXg_xYBo#^Ke93134b-?gnH0+Rv{#=+Y=t>g zSF+%XR1g(LV8QWadj<15NX>HbVxiz;H1pqlkXA7qRuq_HSlv@Od0_!%8yudm%npLL zAFpkFkg(`4_9at6OZC$shkgQoO>#YIdqx% zuWdecY=IzlUc`o~DPXtcsF2+&C&sESVN#=f|0&oV&E!1YyA(e;@eN`_F>Bh@Gj|EK z)AEWl0hKHmLA~@qqAr~Y&7O;+jvJHNM+b8=R)EK>IO!Plt}+) z{0VF~n84>5!!j8J?Lo@xQhb5K$#IQT=yeH*=cPSh43TSPwN;@Vcug)F_GV29za>dK zfCV)Q-YV8L7$IRC<3Z5fQ>IO(@iYNSuuhkX>qeSR9I+mgv?C$Ui8?6wYFaMG#c>|R z_fFC_qO2tMhx7Ui9e9Mi^Xgw&3)D!!c0312aCTE>izGv=<+0TY`Q61+eDNsAF@1tvR_nrSb zE^umRbpUqQz7lyeB8nS$YaS)I z=(v8y{e%S;)aEEj!e4+2OR7H>v0la~Z~Hd?N9_PqDS|p_o_XnKq6`K>dk|fA8ng2Y-pbgdKl)oxPJCK)A;mhJ0xqf0y=P7lT9}Zq=)YCDhnEm=a)J_ z*)jf=2`{ZtN^J_pcUx;>X51+RlJB2?cB?>+NRjrr?_Ayrx~lcIl55p}lo}B-$qefP z9x=$Q+5ATi`T?hfQ%ZsId$d@t?bjlbcK0goAmSL~FvmXJU7YbX-8gY<*Lq+_YJl4xHPTt}?ONu)se;mwMhMFdM zYO_#1la2Z)*E5_TOF`xxU0E!&84SvqHiqZ7#L1>(AU|rNYqnYyVHS9Z$u<2#ES)F| zoQJ=4#s|`ku<;y8yI;jXNDEf=K`SZ{jKY!AAs-0e5B}-+yl>!2Hvzd_Q+$i@s}MQbL9r0tT>pBA4dxvPs4|`bD-v>1_DV@D1p3;|IK7V?;__339JNWc~%LUyPmi{+46xJKzD^4qMew&wSVQIQgE(>h$zYo z6ye3$aen|)%yR65%7hAT0QTE2%NxYVG-7JbccUnq#-VV zNXjh%9n8f`=uvtE{F#S|0`oH4dQ`r;^jZw~L_2S<_w$w}DgV~Vbgb}Zfb~h+b?`NX z&3h=J^p)VaDIxAZf^~&a8AWWRboH=&c{@@-(*Y>_1Bn3nf$6VGK~MXDT47N>i#Ib6 zt4>jLO=PiB;hn9Zz)2zAm(K8KvyAzjB2Onr`Nce@4G16MC}NA0+L%se+W|H_ z@(2Nj1&!64C+a-VUu}fAsdewj=-U_>TmGOXxioL29a(eD1r$tP3BCo3tT%{get=HQ z{P}#?>kCIFanR5x*ut z58shmI$|cl|KYX0DH!wkY~@N6MaQDsiEB>xNtzIcNm<-auQ06XY5smR!awk3fj(Z9 z32oxk{CC@#E$XtO-@z(B6Z*!nI$8~rB(N&)Ssv@JKbkjy^KmVFM3tpYsQE?I_RFg( zd#UN^-`I!)Md$8`+pCE{fL|N*5J6MkD43Eke}Uf$hBuH9AQxJHzsZ@TfyPcESBude z$0;&I4rb{qZ9c~!U#@C98g{5^r)*vUlW$}R*BwC(^PBYKnS2En;edGA9|X&y{y&^) zhzmWkk4IUaV$%jVa}d1fwcOIH2H>p(cwg)50n226>&>pz8diaKl^pzG8c+9*iF8E# zb3v~r8L8~hb2m@*UmX$;1R8eb{4&cJ2hiNZf_@kzz{sUaRK0YWhO}kHo~sR!7I~X< z3EgL!>l@pp_vpc4mjZ^OSxbel2LApu80RRjPPA@Mg0UDP$jZRM3CKwA1X>5>Xi8D? zuL|1y?2i@T-w_Z=y@rJ1`fV&~LpQl$_%It2Jwet+bX}Gx2BypNG3GyIn~p`A^m-Ci ze!!e40MH@DnnhRe{{934>WvQav7{ropaUBt>01>+w(=f7zkJl2UDUSUg-j>dhZU8H z2M95Z$?0`OFinL(MC~U)2wQlejcy{e!EQVJBZ^$3l6RYpN(2B6@!cU8+p4mEmbPSG zy5Ijr_dl;rqG6SZ$Y&5R5kNSQ&{`_+cFbO}!1L17dG3d8T{5KgWbm1kSbNP75oruH zk5NMfIrAiI$)`_4TD>L3@Mb5CR|9*;$)+PYuH~2Ust>fUcir%fFGc` zmH&uMYY8|ki)13dc`TF%>=h)H$C!34uBu;5TH79OD}l`mVKMNrTh;2xUzi{FElN;u z>er=ly@kIg^XaAgCeXaM&-gztYRTweVL;|LfN?FuG4S|Ww%>xmJmVy7PoXnJ|=Mn8GdmB))|a^E00mbALX?kgY=Au%`pzZVC5?A$-f z693N|vTA?Q)1`*)bngPq`H$eIAog;v}49^=! znt5Q~6udMFq=`hdnOJ0KCV@wcK)ViM==f2qq~7VwsF0yOP*enT;9Z;#$lk=*N?E!K z$};~QEVdo+kiLcO=l%RLd)2+joeyxizIIYvDzG%Bul=Cbzvhv&!lo&-<|#a~Bi5Su zyx4}BBp$fe8+`@9-eX|+a0A#Bn?Oz~_4GQP{O_0Uy8;JU^Y{3`tF@Keo3q<5Ro7pJ z*z?*zINl1A>Y-9qq^BgcEodokWKMOBr%WQUCuONkap{!3-qGp=fF$XIF7%gQof?M} z0v-R3ECQvx-jg8=^#B(py+$)}-Ul2gLBLB9_!a9kDSUgga#_n(rQa#4zCEYCoKEjJp$hm;#m2o&^xp>p)`X1(FHb&VX!#850Xy9E zH;ag?-)5GZIle#2N=8=!Y$4Yh>}VhX=D?iClKrPx64H!2*Jcqv=FOe0C&w|ATR9$a z7#V+_faP0Y+|xDmCjkEU4|b5e1jmv+?*@J}`RlW^BI!>_Ly*`{wjNHL<j%9=fxxU@yg7-H=zqRu!$#Qc^_lFS?ZV>`Yo8W=+&DcA9(pV;Yy{vG!B3Ls zU4K6c$YFsVmMVk=eRI#+%e?9X-*wUA9kdI#xg6@>;C3E*eXaICU-Cc=Mk|1oh0B~U zVWGt@C{OB9&ehJv5VUlNXmkzgq(lv*C5y~#R3zAtA|5_0x?fF~HyjKhn-C`eB0x5G z*;74P(dJo3jQ>9A71XyHTr81i?MO5YI1DRU_h0@hl17*w6kfAPoi$IG)LIRJnz@FZ zkRUT`AmnI<4gluWsPv^=GK&#esl>84IfhqsJfBIjbvW8e)S9x}~%vK;??DjPfi zsF=oz`%oBn54X~T5V+m|J@I)x`xXNc0UW8*0gYkgJ4rD5(;Gwz(z_g#^;79qu!9XU z%Wy3nk3(TwOe_)NM$AsyV{bo; zaEGH$LrG4VOmK=Dzd9b-M5Vv(t1BljgQ|s`o3zB|@dZqiR;XRrb3MGHC3V=hC3DuC z{MBxQVe)Ev$+7YGlJhgwi1Rdr zSP0IC(b#eH>?yQ_BlsCuR1@jRrzzM?P&*TBnmV#HbfZCwz(-AiIjcAym+@*9YyYxw z?7v|PNpk*m`Hx>%?w>bv&)jnlZrU#{()$nYu0BXiu*r&4^lQgF`dUC%9H&88GO|Xa zYqG4i`0HT7eRN|l>D^~Hm zb=*tbj|9}JS3kbOqj*f)s9)a!00VIJZJFzm83&u<^B^KGzrUvXVAk1@LEfS%Pe!-N z4GB;aCX5zD56}*f6uL;wvxzFnR1+Fez*u9+)DX6}KQi#(T~^B}N69(g2>qpa4N?M_ zV~+5I)QrY??(N=}kakg*m zC~p8zFRvAdRHXLFo`@4C*zBc{F;4jAu=kp)5Xbzq;DYNeLVByOB%E&7xi|Pot3n~7 z@9{Y6B1K-?aE50|`pkBTIn*YBp)47X^eQB)M&o&QG0lL^PIWKgD}bLAY6hrKP@MP0 z)BTqV6B0o*F{_m>1NtpYm^ojaTM0(iv7)}5;CL3C|N1;0w&XT&XItIxTbmCqn<}Jr z8KAy!HFX&m1~odw+kQH^fNtwCRmTBF(F;(&HpOC-C)@>{&#Z(_{T7?CENv^eTlu0g ze4(xaTV(VLJP(1>@P@2MdZ#E^0*O?r&5HDNGO~onyb<|QTzhKZA+Y11GgeWVAd?A(dyD2k%#gh|B%sdnBnMBxr_pHBZyxhq6u$djsYF>KCq+9 zoJKd%>&!@7(aHt2rG)E|gEPI`Aa@zb*>834Jxj*uN#v3&(xU^6TEMAZt~N*i);ChH z$r%j^4lx9FZ4BEQR3*_$!d^KsJ{=H=xy4cHZ2{cIUF$`X-uUO*P>f3?P#x3og|e=!f~ zDst9@VI_LocLj7=UZ8az>H*ZNVY^eTutrXHmQtDr{r*SNxzTbhTqU~$VS;Or?|LK- z)fr$;%6tv_VkTf%h};abmAzakowlKRJ2LAy5U#3#hPdO8*Dmt zjy$q#H5gR%$4#r_-@yVjaf{fccK}A@R3JbFk0&w?S!DY8}4B5KImXnai?#Ieflf~6Zl&HJwW;pp?C^%BA zNmrGaDs`W!Rzc&e;4$`3U-mJHv^%n65?pc@8ZW-4LRWbGS1!M$fBhT!kl(P{JmJqr zD%*PflDiPgFQyG>wUuZ-^zCm~<)j_ZVNKH0jUCwShfcYg18XkgHN_TE&m<^%DAr{l z@Yfxf5?}VzGi+6pl0)bnCImQo*(-<_B)}xcf@qvvouT+h+1sg+xh6h6)rOEm_fwD0 ziF#xCS*)0or(PS-xn($szd1-O52a6FUeV{EqWCX-<{N-JvHi?M)_O+-9v$hw?k%q) z4Q4xkJ9IXH9&g<_0n^aAs{ne5$bE8g0<|L82z|q(36F$u+}{@C z=6!5UJV8zouV)7_S0P1ETR4q#BfC33TW?e;jN$J+V)>ZTdp~imq!!amYw^pr$C8is zy;qO44a8Ns3PmRKb`7xKVynCVP-JS{iSMDVR_E^a?>pTl2?L@QGf1fk1qHOP$(4*U z5#Is^exjniMCVSWt{&UTPlY%(xo-@EG};ubwq}4vt=MY1nkyxrF|fzGXBB=5d3soj zfaB_-O{`*5X*7oViIcn>qUAhprA#UuYQEUamL3mzS57Hm%YtoFAnwa;r}myCR;@@4 z_md5Qqu;!oD3}%;l`8NFf9?sFaFlc;H9Tv`0UE?TP+}aux{UYf)58Dv4k!tjDEA5P z-6BR=A}FPQd7o^1*o8=f{?Qwv-?0#mP05S9gI;Ty`0OIV)Y6Gq)rE9{S=H59u3{LCj}O(3!v`~qxHe4bD7MB%;*M&` z;MP64Yx6*mu9B9Srp0d?M2mr{1ZMu4M=nd}iLu*jEIgK9zOA1rN*r1>1d6f_7h$s@ z|LhZin^XY7j&I}&ZY5=$7T*LjMa96=+ES()8UhZm(-~d&;^(vB;L~$r_9gc44=@9z zn80Y;V7;~#4!LMgjNI+xb4G^n#nEqJ+32dGfcjg&2ij6M!-N+t5V89aSV1)R z#k$*n_##Sa%$b6IwG`gvpW#`5#*%pGKlSf$7Q6+9SB{aWctE#&?-w|<%K)OW=xW9g zMMu)aE*;?xyO$^@Ak_pC)Sq7)x3Iz$s>oX2NYZ$&e5>9eRqBYKi^MX#9|!%R^(1|< z#L$#KJKt|`gi*Vhbodhv%WQLZdF>{>o^JU0NAY|~^yW`#>vVlCRhP~tkDdT8Idgy~ z|93Dk1^jPB%tmey3dZb*$MByBBp*Gb__AxKuR((2oS2

nG59k69RrB9XpQgsisifaBxV}cZ2L{dXeR)Lk+(+%tSg&2%?{|IT{H-M**4^VR^M5~D!z!pP zolHr`R*{$4`m5iIwl4z-2qtNwe@S))_lY}w@zlJm`f>L@iRCfbyMl2)ZX>%mbYa^3 zA^8rbr3ClIMNQG(RXqeM!C0dbqm34^Rc$JLy)+rYYF{z8yqDqRBeo_qVcs&0VOjDI-n zcXQ`OTnpE%AR4KyIqCBf0hT0U{c3KUiVxt{|FRK&vdBluNq6w~DQ;M8T8rzwOoWgl z6*pkU1ZE%gH*6-}ohqGQnU=N#rG|5d@{_|ci z+q|YgdDpz6Z(@Mrt#ANB8^;8T2ZcjP6XmBAfqM+fOr$C==_3|GZs!W?dNNHLDxJDt zDv1G$i{pPy(Yq+u{fO0JHD7_#1^5l>VclpkD{~>w(+6MWSFe z?7r;@ikHQltAk(w^LOJbI$TO}RILqzKJqB5w?{KPdC8lO*^dS-1YXrM(Gt(1k*>?Z zpT=*_CK;!kgYEH2*6StuhO7-Rq54YO+CmOJVNDX;9pGok3Gh9BDSa4F+Uci8 zHV(Y!fI5u>qN;GS8+ctcSo02HFC9T9OCBR0ZS{~SLFlWiQ?#qdc>iYuc{iD_tO#5- z-T>c%PYM^QnKnmrBI&G8!wgVm(V(OUWa`hp-H{*T_?5AtvN5iNPwStdgH3Gp_b)Pq z{vaV;nZdCU%dU(j!#b!GdAuuA zJ=Fjl#2Q6Vo+)SK5P>TB-&o&}e`AWl+GP#6w*yDic_@$kUn5bWg~Ik=tvJ^&Cqs2D z0X8TiLd&QSr(DddBi{L~KTJ27Y&IcZP4;wzO_k&-ci0L?VaDvdhw9vfZY$dC%EI@l zz{h^7$dYquw7S+%o&|EuEI|*P$HGYoA0@{Ovy15{NO)OcqW{Iv{zoG_dx6sFvfSbY zbR}LA96;Z8bp=PO?F}*M*i>}0V74Q18HVhY(GV)e252^zfiZs(&)RZF*1VC(4XG}03pd(=x7^% z6wyXHiv;r^J5Jd8hn99PrN8yMN<>nd=shi<87TEB>`#Fe3NyN!>(c;R-u1ZCN$W(L zH~3L~f|d)6J*g_@(^75^)dF&2EZ)DrL0~)<70VgNG+~^!>K)06l3VE1XnO_w>hUuD zW9F=9jT^l53G>wlQn^?DV^6RTDLlUmdGI3mLRmcHvHw=;O^1Ms{jI{pC6j-bv33&p zROZz!fHp2{wN=|{>SdNmxFb|mtPN*dS4x)03TBN~!i?e8+xuCbTB=knC!cjoMXCeL z46+iFne5q`!-}vSu%#o4Fp?=oua0w)Rq)y+D}sW<2FlVc9X06yDmkedEDkLVf!ibG zU*)g_;NTuy0*MNV_02~h@@&|J)xZuB{Xlz!bNISd@eOe&GyM{6T4Z zd@{+F%!)vDGR)8v`wc<-z|Px^-#8=L<6V7BhqQ!|eK@0}f>8agA)#9R(cG3c9>K5Ko5D-l7r)#qudl?<%@mQxR1@@kBfO$_dGeI0U0de44S|RkJTVx|p>`ff zaVwpPAVKEsn^_JO=3oyOROj;`Wnw{!v*N?|{!;FN z?Yvi?C3d5@KXjc|LPR3)$$QM;nna+s;OscYj#=*63u2AtXccqOpJaA*wT`t2S(JGn z5Yg&@&SL8LEmkfoA#y>a)#R{unQ!T%p;aHggr^-4<~4uw7?&J4Z4sHXV4c+^d$a`i zh^@$qn&k}617a~?NlbeBcl>Tx-3I!-vn7wf{j&h%r}_@{beJQDx~LhLXoIA5ZDz54 z@Q4gYsU?0W0R~g!I^dRd%Pbz;$CZ0{N=9nSG$xTCqLyRn!}&TTx~wjV6h&lg7&S2> z2h#B>>HQ0(0}1}_BV980*qmn`#D~K7cS&d2B{S=r+cMYH`WJ2U{6SgwHzBVYBkyXR z9qEJsR^BhJjwguF8=?dfLlr`Umz&(~n)bRmPFZXV1xTVQ;0wSG?X8P(Hn?A_D;^fS zD%Wmf3_as6$Y!%^imYw;VDMCm%-pI8tSDCj7?vEq)_?dj9Swyu||E&4(_xzg@_JI8eXEqqCyBjRU_309c`S zv1nwLZ-@}W=B1QWU`Xo?8{*+6Whcq(KXU82x#FaWAd5B?tl|`Md64=Bo?j1Cb>s53 zU_VDCQzYfhTzMIk`nu^5>;$A+9@-(3#tCS5L+i_p@4Z(Nh=`+E1dC0S( zpCg}&Tkhzr%odB!F{l%3rW(3VZSnsjn1H_DSwDZk6iLBJJ`zP4=?c(C-eCC;(PSY4 zq2h`mpeV*WcomVF3_Ep)YDVST%q~9fUiX8&6$r@bBXMY_wE*_Cq7L84!;jYVXJRrn zMMq|=dcVAbK)Z**@Af>kRj){mYWYuUf}TQ(oM?yoOe&l!{x7-Ae>^?XJFhlCTvtW` zPto!Zx)LdY$W0eyQ^DKa2!0twfbK`dkg}pd7OzLCFqxzywR%m8f_*yRa!DtsJx^>& zv2OL@;Ii`PO8B5L*U0zJ#%{#zkF*C2yKYu9r4J!>?q0Y~@6+zEWZ$I=y@7}(j zg`VDE(Q)i#6Dt&7M9J-N<>u$g3p*4-YHKxj+{_P-n+o%C>mqt-dC<<33JUnp6#HZ2 zJDN}3)Pyyeo#D?nYiAemKYs+*;pIR{mjUGE7<3G@H$9RBVTN0MUj+qO(US=>7U6dk zGAe<(89Z(p_ym!#>9ccjjfX@C&;W7+-7)V5YT_P;Lol#_^S;IMPGd~Vc{{$SnY+7& z@&eOK+e5!Vn&lBO0SB--oK7*;wIXn|8Ooyg%>BVTqlF|ibLH+x3{7sTq6u2P!Tv9} z((wMXch4g2MN~HIaP~NSS9Ml%JjOGHZ3%ZfW3MtCT^G#5YsfJFXhtB7S%j=~txK!N zBgGw8uNTic1)6v2@^`_ADkFuOi^u(+hX&(*W|r5#?%+MZ%%2J!*P^T$#A_|6W9~zU z+8I{&1_inHmSanI%RBZRNGA7i@ z=_5vWMcW+_8VdF&IsrMg%sEkzt49UJ>U#6#VO3bAeYm!k3Szv=kJ?@-B;gDdEkVs+ zler{h_+AmQot<^acf=q3^Kbtwm?0|?B%5?w#+TUh6nX<y^myKC`Ym>bu9zb%^?J*?21_1YLGvU#|1$^^ll*8=psbnb%Bfg zaVJT84`qJIm5tD&J0xJ=9w=;D!4>A}kiOc&vhXoNcToy2+yLXyMx5XQk>UEnCt7O6 zVcWYuIG;UeYxCr@oB~6fi*|D5zbXmn?iRV_5I_g%fd9I4NclrhC_)7P(`v?G{)BdH){Ok!cIv7o;{X zcz)CUocI*F+7ddbE+v8$sd!FKbaVT2*{8|riNWS=UN>a=3`c`jj#iK~A!(CDkgGIb z+mnj}H=^L7TKIHv@FW#0xdmD<3yZkFmI9At1VZnJ}3E7Bs!>){3@-aNWk+leZ_Q{YM_b1Bx9R$B_Z7dQP+D_i_D|@tW zXAC-phwiS+f z!}W@lz=QjttFm!(tQY_*qH1p1OlT}`V1Cp0Um;B5bkYfdWStQL)eTgw-z&-dP;S#- z$h$At1qyFVZ)RAEu^F-(E>vK>|BoTU+Z*ezkxb#wkli*8{9WbeH7r;Znp#(Mv?zs< z=^Mv&UK*Tu2ZuYQm=U^TTn{6L*G>jax>&7;8|+wr-%;>g89)v4|8(x`YWlCmq#|K4AGI*0k%Pk8$d&c$_@JUOP96Z#?2 z!*R}x+i)SQQM1Wj+Hh&(o$1XHrK3_O&B^1y%4)+o-w%8DaT``Cs=P6#&pLI~FvGO) zO^=e&CFo(UqY?}O7a4@;gdd2=C<5DY+$ULWJ!{I8^;@sDdC{7LK4D4aVuH${`%@fq zRI~_}>3n}Ns$Q%z$>V@%@pQj9n(XwSbCYT?BcIcU=#o(8P8z-Z{&$*zR1+g!WPv4B=28}Ji0*d@V8A9!_GiJWfA_ zNi1%JZ%0ar$6=QSW{%aVqtT|?plToGKj~T?&Y}$?jbX-&+d(*3I?z(@JE-hNf7IM{ zR7nt^%&Xfoiic1+G_g52an&Kh%w8;7gAGLkd2v1X&irpZm^mG}`(3}UqJi&g5Dw@5 zn_ry4+n(LmsJdW_!`}6&i8iR*8ty0rhVIvUwrkf+nC8>)e$L=$$QH|>;87Ic)KkyM z%Sq>)!SIY4HJR4Y%HGZVXLFcbae}GOHV>^GR7M?m91JEGR5X@_?~?HN1Ii@QBv;_{Fi+pCd#sia+FkUy!|6|6H$@ zX?1E)<(o?M0}O@@I((2o!>Z$w)t224b!{M`t1R`^x@7gwAMhVl9Rs7{`#9n?djX9; zbbeoTxcmNlPT@#VnNbdQnicFEDQtUadPM3Oc7zx+bm@*>Ij@w{ES20QJ=z@>Yhas> zNjG3Do5EqOA3#L*ux+SDa0|Ci~BQ4hO}EZyyI6n_oLRHGRsF|0R0C^+bcDL+3%H+z^;J^@EvTy2~KP z{ebqTn_q4Mj)FFvCMU!d{NCKke7Pl^_rtq6)3ndVi^gkSG#=f24e(rwD3syw%5Cc@ zm+N_#0^u$T9qM1v#OQ$n_p`~;vU3vYkDe-(RUqaD28l~{-$Ek0C1^rlc3v(=WN>kC z&y)*P`4O6i!HW}$pAoyH`Du!&PI4&Dj>0J#2EAotviy!~Svho-X6j@~gc|P2FOe75 z;rEL7cc$CFraL)~B{usit8>;FD!KyCmU&nC{Y>zd&d{+t%cb`vnq#-vRtOX38=`MO z`uORkwAtGIF$rhH(Cd(U!JUqRr?oR2n%&>^Rj=g}N4lz~Il9I2u^cVSFdWPygP9Uv zY1H+?_d5^2!Yi!yM)|urM0rJH9Q3i)A117valmlxpW+yVz}LoIJ zS9!2RI{XvxSiN~vf6V$fm(jYBf%UGvuqC)bI|;;JEx?_|u@rF@kXI+D1iI}q2z`{x zvN(kBG6=7E?e^&+Wz1a0tbLWEF`inz>9v?%SmNnLJe7n5{Tv7-4XFsepg%Q;aP5{$ zer$Hf6(=IYGR>Xsw`q2!sYRxM~6(G9G^$ z@tj3XfGYzI^G9pwQj?$`_Bvu2n$2~5T^EZJaOZF^B(U2CYcHrJbR5h0osROKz z?sWyp2c0-V#26o1xj`C#}FciJT3qyb_mI%~FtahCVHTALIxP3mlA zqK>)7EO;Hmm29tgYH0QZrDK{?6HPBA{j0s2n?Sfa3FRu$Ka@&0<~T)jyASN!)^JN! ze6jeH5Sy+@!`2#Qm1pf3<}8qY0uh&cR+&D1+A=+# zw~8fLLuH(fDWJEE_$V(KHtQ%@Oj&P}7|We`ldC%flPGPzSz#wy8XdAr?pnSnZmphb zx36gm&F;2#e6fIrJXJ#|cyJ_E6iLdmbu-=haB z2vn22L151k>yI>2_uV^@gD+bCX2S7)tS5A+CR;&oVQx}5^xPfa+&~}^Uekc<*Bw^P zYHOtRxwfgOQr0cSbH||Iej#_ZFeQHWh9B`g*3fPx(PN826oLMJa&=V#4gk zjXS?*kW~EoFj!&x`89%mkYvZ)j4VDx2k%R6zPp7$lH{Jn6{OwzndzCl!h^2IV@c(U z1_wi7BFDAa4~cq4fWr2$ch6P${DSgH&37jT9>&iSGL>|9vdN-(7h=A;3WxA6s1SYn z4wXilu@yd%d+o-EPYQWNwYV|(nLCbAG7F>bUCo8)`^Rk}iBqAZT+v$Tug0cF=(+oP zFy0lc?U2`;mpAvPlcf@2ofG>j_HrBXzjxnm#7ZGeL7$&7Y_Dk(ofVz?N{mkFsZN*d ztRjugpmxoufDbIos$(aM``MP!&Q^wd{bVu@15p}a7ycx++w$XJ5jF0OAQf(Jyx_Bl zRg}eGwHL(F+jnTS_Yb@ZbTl;GO24RBDrC~dR^kHM-lzU;gBjd4i->$9-UHqaQ^LN@ zk;!+O7dA0xs#Nc@a0LS#)X>+novq-IQ`Iu}g%8nH`WMwcr>mQx8toViG-UHY&7Mji#(da2a-c?rhjTG8X0oL!97rcY9GGfWxLxl=+V&a7ab-x~`n_a$xIMo1rB>2cHww{NhR4s{^ zFQ=>K=zsrU0ah>yJ}Z<6DU6VH`Er-{M-1*$!|Kz}$X{am=1OmSB&H5NaTy5QV3=g> zgn!7%=QKA=wC_W^io-tK_g>gP)gejCb9DWb{A$QxgmHwdp4#IzSM`R7l!2u+twpy9 zti3mlQnwGx{}lL7NAwIujg;I<;;R=|6*_pU zRvOC_cico;TdXm$Dk=v{i=VnFHUy_eyqAJSJgG#2|WBxw(pQnY~)(Vx*Y3CZ_13 zfS;z7$sL*1MTU#Y8FKe?csJ|%U`_z(sfoMDdz2$1xxK1%lSc=!3KAJ_`1v$8#u*Fx zA6e7{jZ)%&c;c3v_V)vHO@?$xVXLD({0DW$6Sn+ThgyWdZ!rA(R&}Yz!W4!Cxv-5W z_aG;k2u3*LIXsde*U2ELDzSo1m23iW_`@SA#Cym-%+%yCKx8>)?}(#aqx}6G93#b% zCMh;mQt%25;LwlcZKyANEBl1RruNxaVg&zv^VIy&3j#Ymh<%ehASJDNE)sr<=v$Dq zU0h+S9o_Fmtsnf9nnO~RITp;sUSZQ=@O|c7+QFwVGbHhS7s*rY)A_qSO0#t9relr| zaZS7f@WxRyvnW4_htBouNY>b^P!ylU`q@QI^VsNJweMz2Z}J*S+DIGXITm6i^y22a z_TV7PQqX;RJ(ci;KAgEwEMn8p(B@~B@&f0}V3J5yWwu%qsrZmT86v)O{hFzuI`tdl zpm;XziXWacB{F=VJU!p^y%2i9J#vKg=>g@#t51B?mh$@u--a9g=>*86QQLZsTW-b* zRVDg=|MmhLxLOxi;SS2V#3qGaA;qpFsIR{_Ve?7LIXO~WiSsz~GN9M?U*vxd@gUKx z@jXOX`t(yP+8 zxn2I$3ZloU!Cg}h?a8WdHrBzpJ0pivsoHbZVfXLB?*g^Yka>i>2J?5n1F)|_DM|zf zTFhf|&KT5U6HU;h`{ zz+me}?ouOW0@q5{l>+xLH3q35SyQFuoCX>i(UAxvuJ_TEoT_y?7~M{sLWm1K+pDM1 z#ib8P;;^Od%#l0oSDc2Df4!l2=`;`#266m+50cW`AOqjc{Gy!PXmrVjJ_TJ!PMHN1 zF`m1<<&&Al$E`g-jKA5ANA7J-2F3gDSt9kCs%%$>p~z_k>dggkBYI!gtHS}afQtSkbyi2S5ydA&IXi6&8-JE`su-^ zFh9o#8rJ-WGN|W!KwWwmADFZD2&E5dkWYpavij=VF+(A~YLkjJZw$Jmj;yr69Tn%r zy9?z!y={VC#Ey$Yat>+54sNf>D%xN;IU81lvkuRN#q3W?PsmqSxRYgi_JNbWD{~Rvp=BA{dh#lZ+;`=^Vi`5n>~UepvXVClxaQ= zW;5-z#wJWJ&^ZaeL`)vWv^l2V!6}`T(=@|$t*(Q*%78T_zdtj1#aPHGS0w6m393-| zJAyZK97_Cg6{zpRn9%*^&gXsBBnGl7!$n;n1oO>fS*>pqFoXOIHAncwSaqBaajCf!bqVa`T_qQD zm`JG;LKt-3kJo(2Wx&$nJ4 zPnkI4{PZwPC`X?w>^?pCGaslLtVjIJ$E4ELSk0{=CkINSQnWa>@I7cowA>~!IsJ%R8kRSp2Lldx)zs+-lvBxH{;EI@w zP3OuGkk*37-(s&yE;RA+*&G#k_(J{V7 z4nl6y&-^JK@5v4H;7nIjqJQ01!0H4n15Nqwm>Df$+=q69OGJna^c zla)1nPeG~RwS`RzvOC->Bn|0D6kA&r?qK_Civ=1xIx;-_JxG!Wi_&CqjN_xsGC3IL zu4)4UmZ~NMF3JBdyavenV0R@z#zD979PHqfnR39So4l=30s{%AzR+WgLrPXR<) zCIqITbc>v&f<0BOv4{<_NFE{OGq6OoOpIPclmiq?IPId$nB!Q!q7FK3rOeQh)2fo~Rua}EeRi-n)Ic%l#V?LA>$;DYY z2op~T#vJ#p{^y$og=QN8cR26%rPUtek-eNyP^T%_Yb3#xrwlYVw+g>ZV1rJo$+E7l zJ$@{;<-h-mG3_>4_97YS&?G8va{f@ zwt>At(Z?9QLtV8D+W9B21ox}x6djLhZyes-JDOjJJ6m-#r_-7{bS1meE!8fvcxL;j z(dRv%-(xfe5iN^Vf@3rfWGDY-C<*vQkgwib!S;<81o z@AR>*o_qVnQFPFEZDPCL|GhN=7 zH?iMcam6~fK*Z~|x56J+u-CmGaS1RhCrIzEOaYjB=vQHHv2cd`^ZaKq6HGXY_-Us> zn%YpF7*m%DNYLb%bs}kz8|GbyPd6LKmGYBbZudB-O*t(J%N~kaN=6-Ek%kCHtC4M# zKq~am@Wvr70ms=RnSrwZ7a5__+jEm)iB7_y|%QfIYO?>x# z&~S~gf$L+Gq@a(2O^WEQu7CcqkX_8Xrg4%>+!lTGtQu99WJL8$D4d}hK{3k{98%`i z*biHexQ|!rM=WkyEdH1dL)=92gEGb}W6^id_AC8@oY{8+x7r))0Mn*lU|LX_?kF@}ox%VQF8x{(oGZWmr`0*2lr2yK|_a z5u_W0p+PBWkxmh51QCfr>Fy9gQ9z|ZK#&+ZL|Q^yKknh-$N;N{~qFpKLra11brdd|ZlSW83`sVb5y9Xdec;jWMD?Q3h2~ z85TN+T{wCBAs=Tct(ohIMSc%zja%h_T8zVp&Z3o^{^o|gB9SGSlhWeW26Wj%5IcEa zQkB5}eLatav0=cnv0Af2n9sZ1&v6JSsXhUB(GbE#`MG@N6CSllPvM!Rm{p}*Vcv(W zbM}kXc^8|Y9h5U!&m_|O?cq}q|3A1QA*T_19QfP8S7)jRT8kBle7=-w{k$1W&_0JWjSbZAR}TD^urZ&)MSgkE{Ax;56W)I5QN~Fk4UtD~KCr z9lY}Z3-L&lsTS~=&;Pw(|CKH;uEc<&!^{cDEnrN@I8JJwLwiaoJ~yQvhB?CJ z`kmBM#G~zcXj_1PD;P9Wf)0tv03(>n3FyqtxO)&MHsYt!dOVtBjj2`PpCpFVxf42& zGZBEpTa71Kio!ha z&+jNeyD8ynS0NG&x~avVmEGcPO0hnuAA{Gtutyj zhz`F=_F8NdF4VFv%K{;ou~;R#=SqqW1q0^^JMvp>Q#mk%TQ3cf9=kX0%gEr_?jX@U zv@bC3@a|%J>qcgrh0|ZHf7IBoO3$D;P*qBvyVl7l?jJbTON*QJv_Qz>XyG~;`F+o) zl6+z>*o*xpq47fj56#i?nQ+h!(o10k9Ebl${FE|6ZS5<>!`Rj zD_`uL6ihNnm8Eb-H;<6jz>p(+MO@S~Oc?~mf^j!lyHSh>bwZ(dQh8)w(<&{Ma18a` zJ483irV^-mdu*s{go?QgA5Q&J6xE@v+zp7fvkC7$W(RXJEsbec2Hb%YRt^E6v z|5sq19^A~s7>TS0WG#g3CQVxR`>-u({fFB4VIDE5)~?U!6kOU=zP1!am4gPf!64AQ z$Y{m(%#!Ra7Myg?c*N9qQmb;fM)fWRr{V#Bv0zzH(ks)_ETWJqMB1HN8KJakGFTHF zUzMP0!J5i6=aiHs1Z|Xy{thzfDI=s~J^_-{P+AuyCNom%-DP?h! zaBsHd(0%@?>*?AEI18UTNa%4{anXF`W85yne9>CYtF03g3OD|!uRfqLElv~LS6A7u zo!G^YR#q%@!}v|!1BPB{Vl%3SQYl57M6xu|iAt;Z<~ml>+Ia`}OHTaV zqRN6){S%PJ{zHlY2FHLC51|eWOE4~C6t)!El1O_erhmw48rM{iF?)znMG<5R>%>G4qNY# zeIl(1szv1YRE^8NYrPh};>sV;@b;{#}|gyFmo2!q%@;jM2!rh@*yZ(L+}Z+JqIK4SH9? zg;E30M6+yh#(L|EQV~S~uU7sA1diH)}vW||+OAL5E38G?RoE zazifi!|d`Z(}v+2GXfHHh%jFc(b$-H-an+25zv=V6dI4chP;yb1zLe)CvXP4Y!x{c zkb&H6Th~$aCsX8`)T7oj^Nf*d>I`F?H~AC~k$Su_QB|@mHE-aa_;_`*GUwMIMlUPawz8?oghd~x2{l$qZf&Et?dpGJ(3{^!Eu-@wGg&{8A~@bdv! zwYiV4a4A`Cgm^4ACbDGN_C~SMQe~-jw+dEU>!5{DoB7`x2>4!KvKsf#ng7^he2m|7 z*D+uyrM?i+UxYahXO%)BuwU`=k@T7d$|^>iXibl}SBl)M7s4u>En3K@`Z*eW63rc9fRPFOaD^ZuHSx=!rNn z?D{^|qw#2ZMwbVJZ$#O6SA6mh7|P-7ueB~PB*a%d%wW6Wj2DZF&U8rS>!GnK@%j)D z_sippt>wv9uN2wR?Pc1x|9oc+(g5UAU}Ae)jO77>bi$w5n^+hlwrf4wXbP_K#jG~d zlT0|z;*FcCokRM)6eiNWY&EvgR7HafL<7ptw2{8R>gi;3F}k#gr7} zwBA!YBy9`IEO|(z%R^lKnLFlt-_*52w1;BSKUKStu+BAWrCGiRDBP2GvnxBW&gy3s zVIq=ZjVHjgv)#`$F_7cm-;3lCagDD8On0uJgk|+>FXLQpwU-L?_9}AWOpI}=XXtxj z5xdSrN8*xG_7ZhDzT$0wkFOIne5u#By=&TuP7H7K?LR6dU@dO@r}m^wDnoey?tp!y z9NiJ}b61zcvGA7i^DvqWc6htlQkGBD-fY>hrlh&KBSv<44C%~N zrhymO32x&!y**>q-q%SLDgxUGcW@D~5ppxL> z>ouQi5qmkeGMpY_x3f-pfL&Znc|0)A{|ezs(j&ZGf8!B<_-_)P&QJt(@P5v$>$}cI zQ5Z!Z7Iw7}j&@V{e*}(7Y>=Z(oI7<9{1x#+Q7`yoL+EH`DesCNefg&b0(?N;ilB>$ z;Ggqzk?iq9a^MTfHv}B}`d}nE$#0kq2K}jKVh4M=!=!25Xb_0Ir}6V|1)r`#-9cYL zxl^Q|t>5a>t7)_4%_3zeVFwV1~T1uRXTEtk~C zp)ALk?P<1<_FBDFx||D2BUc?fq)t(jTi$~>K0WaE|8IAVh7R001kXT7I&_OP&z7%X*$^RK?`5o|g1Ab*Wy>C|^ z1st*sh*}NsS%)C!WITpu7(sq3d_?|zdSsPmDQ*@s@Q3xnv}FKF$gDZPb6>!``Lxh@ zG!{+e!spf{sRJbeZ!X#d&)Yy{qE%gfU4K$K?V7J4`xD;0>)JJ@g!QAjTL(X9FMfj3 zZabLMH`wOfCgA?K-(0(O8{qNS%|*!{q6KuBj?5-9d*Ye^kmJ7s=30yo2>!t_OeY(g zH;=)}3UDlwN_KIFcK}kudck(MBw71X3W9c!mSV zb)Vu5dcs+ZbxVinOKb!Z-($eV z(JtdA9n0pms8Htr2O;piSt}d>>zAdGH^n z;B*QyBK*4M!%~)=5L`Iew=XwHrRFdS?x0$AiCpdw^4Z#Ky^=X-MTHjJ6!s8U_{KlN zSuFI=9IX~ZneAT5`fz4K?F~T6f}on){@t>2V|G`wC%Lnq9NV@px3*vV%Kg}2dJ*F> zio?*VUZ2!><2)u?|MRbO0dvrJ5e?P4f4c`a=C4J6u%l|3P!Ck6+Cn<`E@_#^Xtkb?Udi{S;>eOIG~S(EDxB3hq(Ju46tF z;8;Qc>2p`I0U)3GpQLwhf+BzRq=Zf6h5Xa*&2ZME@DP27g9NCWSLcv^*En-nHiO-pfiXGCuO&^K zudx}myjxgwA1m5awu;U+9q4;+z2vMi!UE;b%tKu86=Nzf$ZXU{?+hFQIXz}p95c)= zQ2kkv{O|iUL!bE!av>l&u)F)6zvS>}UU|~I0@~gmkEFcxiIAPPf#X@rFuR5rLU79( z@kaNu@2BHFPfBUd%s**pDN@{dUYp;PS;+_G7lGdL$3?-XMPQfBLo$yZ#ghr}u;rKQ z<#PCYPt*HodHn&%Gir8``N$v8EiVCTtdJm-P7W+$ar*-KMe|mHr$9-IGH1eu6M|fR z!SY}fXDn(AXJXNK`sb)5<68gbuz52)-v2~wakF|*?Es9_MsGu-pL_Gpd!c9tx*wpb z6_zl7w=J+=r0B>*@!Mz`-%D(mL^qpX#^F2K3_A;&pDVDAGK6#7b`PhSwKvtgz5;83 zujD}4A;61x|CZgVXIGlFOOOZ5J#JH?g`mp=|@a%Wptij8@RDd5_ zzv1_ZP@0V*l!AjO@UHE^o3T>wM*d6Cu5a;z99*JIPpy-~o-@~Dp=+(bN7G+#hjdX} zLbuPvH*d8LFMEEeW^@vIa-!;il%72p3XvK~i~9rC$`P5ojinaZ{cDfZ(5uv+?95A| z(U|C7UV&tXcc}TdQ4gAdj;hj~c{FlM@ac~qGitCh(Ap;1ax#Fp@;pt+^Qy3MAD2h& z_rju8b`d<5$lKAUY1bNgU-zzee(%zHBsEkMJ2{IgMQe?@KTlLy1KzrXV1=lZU?2ZT0fSM&&MRer3vgHSErl7w=<&vT$VW9xCKR^xH@#Vc+k<wSLrTZ;^l>1@Ld9_AE~xPev6iWS(-1XQb;_iTgF3iCE1GBB*&-Q)tN>FX zX@0C9;%yQo;n4LMBUQcw3HtIC>C>K>r@;C77*O%nci3im(#~FA=w zC0>Prde%n?yZL(XC|D`LdP?J7#^ZJ}oX_RGP6Az5+~3&U9*I5df&h--UVDQD2YTd_ z>BYOrRRP<8BYK$46SMlNJNUiF*^k*7gPsDG3+Dhu3^hUL5ehA}asHAuh>5vTTH z8+92PDf9RHAO8yQuw!M$ScS%F$b>w^n!AE|)iR__-Pr0E4W1x!_ZAsIDUo-E!AE z8xTqx5;C0iYUo0bdAMJzbb)~BW=HI}4Jf27T7AG3y(|a4MciU>)AF0JKOL`}FMt^d zX&;a69(?v=_vS}%#?OSVdfss3#0UsJs|!ABBNSY^^(cAhR?9rN#V^iUKwt+hNKnpd zIk&n31Osroa1Q_^r2(|X!7=F9w8SZhUK_o2*lF%7(keqk7Dj@p2)iv-gQ@p9A8wx= zO;=xnySYF-jD?;bDk_VitG|)(Co%}EZ}j0;+z7ZA9!#8*601N0hs;OkvBCl zl36vBDx~#2NjIqNp?i9o_ghraV!U{xF9-7J5&dfDq4$@!8sd!0>8Wg=1DZcDMq3ugD2jyYfxm zaa3MDO#l?|i5R|vSx+#K-P6pingpksu$jyCQLZ|p9T*Y52k)j`Zo}X0DZMAvjN4XOednFmXFA0?<#Fz-d ze{#7FI)AO2K}kb~WtEdToZTv6uzn;9LcozR~l}u=(Om^>Q)Nb3f>3Q+SX0g7t zQX*mI0OKvVYJl^TE7{s5P;2Q4aB_+xh2Dj+`Ny<1IU zk-Fa=sfdP-hL#<|7VCQMi*ETX&-<P)Jn=T=39*qKQrt>&|>j*u`1xn*hjVEU?sbU|65kDSI-iughh zc=}85X}W_YLUACw#T7tUcLVy|&A+I%pdj}J+FiGbf)#%O#<}`J$bsgyE)Hov55!Z6 zq`64%yEu8`ePjB35&ZOc4R2lreI6VX{buPAXN|rDD3rn< zI|Z(pBh0CXiOeC&F#SrR?~7ZXL{nLX^-^3U%71*Au=wceF?-vd9&BypAE-aJUGE{o zB}gF|-NV{3zir{(KCA-!JQsKHD{(e-+cE9b4Z|#$74CN(qer2c=qOy4zzUZJsisD~ z?iGV&ci*vNoiOvzA$I=F+e8(UK(Gv+ImqedIfQC~{H!o}zr}llpI$!V?S@n)63tdO zJ}9r@gh6#u&U|7WeCYUeJL5sKTnw|k%daxCxBb%*Vk9Jw5tPo{u}85kfLe{)i+p&8 z|I30gHg7ji8goT1ek3OG9MwEy^Ov=+1Q0ZN>2(8H{g0npwfr!9<-NF4xUEMITSWc+ zcv-&slz69=CM~(4R$p6Lv(nG-|(nl>ze5y(!G=1rw@F5S*>4I8}mGY94&1Tj8ddIk}s&q)L4V9|67!#P28 zsL{APxOQGbTvh30i=e16f< z9h?;+w1ah{F$`njV+R4p<1ydt9^F-8Z1V&Q~BdX&n7&1 z1uvGp)}&rfHQ~P4cx=96uHp|c75qiGB8&oOz;Xd5?+h=#$4lKljsj?RRsW6I zqo=bIc_n%$CPB+4e*hUO=*rp*wj@ZXto?d{xeB!j-P(1XU(6WO^NxH0^z*@~??Y}d7!WmsZoEM4AN zr*dIx2pl{j(ZER;pbrr(nx)w)y7ZLkN;o2wS3Py=@yfdgXeT2{8`ndfljP%!w87fZvsL)Ye_cA!) zkmQJyO?lIwfL0hYs~dg&ndMP~$*xe72%HG41T-$n^=6y(|TTtWHM{H)li?WgWCJ^R4CX_lVGt%i-j&9nXD{@&S84@nQ2 z+6JHq&Ypg&1L(oGU@%1l;Fcvk#^|<^BJ@UXhHnTz5h7^xG{xdZx(9^pDK^8N)qun% zn|gbILP@<*nY2pW<^@rgC$gM1f_bMjx;T-3GhvH~1WR85_dFP(!Pdq5%Dr<9Gmqm1 zpfxY(U^L#2+yzK>$+1ljI~%u7n+T@xsa!@1n9rb%jk6*!f^28-#F>*fsBYPCa}2%7 z|4Hw4rmS$F{CDj8SM2{Pj}YSIyJrrO;#q*)mt=0$oc4y0);=h0AHT~xefJPTI6e&8 zsljtx+g|tyvP3f1Xh!Z}+&1mjU#5E?0g)8;?>C>>a{{_U57qPedrQyTq=XsN9Q8#< zIwqKBVycpsL$ePf4!O29lUIP2hrP0#+U{v@IFR zX@O9X_9MPUZsv85e@pitjj7zR4mU(#i3N%JXzR#Q#mHn&ZrA{=tZk^JaIJW80ih?N6>o*+&F$;#iyQk zkfc5k@%isn48?ehSHiAn@rirNM34~&Vt8nzFth3{MMjQd(9k-C-FUQ$)M8C$_Nd^) zU9cs|p{#~U(CQPctA1M`=o>anr`DuC=Dz|0S~~mG_t(eyy(2IJw7v?qah1am#sWuK zB)8uHd%V;F5{L~j=A95IB_4r3Xc~=sAph) z$MAZ}v)zHvDYLYk*L9i!Ek{pU>I9qx|6h!PghN*}7{3;0KMUG@1Az?Aq#zVM#64ok zIx>h{J@!E#my+qEBJ!D|^oQN=@<%@!=`SpH~2%ZlSa z)~C;J26%vcw(@_zIVP8{ia+os$Gx&Gd$~FlNw@E{;CH|v^pW*?*}MK$I8w*gG|dkjpic+9fY|wupzW`Z-kQk*^dG%P3iMH*Duni;HHRN;%JndW z^=pzIFHGUk=;BnY)4jeWjwMivm-=PBuUxvJ%Y!ECj|2bB-xp8b-c})U18cF9y82E5jULgLyF?BGf)<|lt>2YUhAlW^;$Mgo?H7Tzfd8}x0L$*Z zm9RySB!%7~7vAeb+PpAj?qoL}KxEfvhLxylMRp*udj+#DnXvKKD~OW{ZdF$z0?bs? za}Kp|I^IX!elm+f%W_(qXAM0-P#fwn$5PK9{M^&MT+)3cg;4|AD-`4!j;ItaKECQy zaKdStHZjWpG=!~Q;9H(ewPuvpAfj=4R%*pR8Wz0oei^4sB+XW~m_dqixq-ZxyX)qi z^W%RPcRk>@>+!u;vXmmcs)`g>(_xJ6UM%QU_lCFc<3{c8dU5brky~)mCUL#0Tqtq0 zW6T^?nam2y-Yw~N(VczDR${gsr;PUD*L#7%(lsHK%U?s!iq0-0Z!RNP}LK= zo5p;wq7j8&IC}<)Y`R-($foy9o*m?XSg0FI%2%OGG`aNZmooDnS;6#ex&Z54i%3mv z;b@RFU;cdp{>$S9%3cd*)Ia`+Dv=@WoM@6f5vR*sRt)OTbTC}L8EtFmX+NXRa&d>V zixJz5;^^$GJ;_?+z27epzRo?A}ohoijahl12NyApm5<-BX3~49s^|qc(Lbitxn$>iyyQ;)PIw+ zNHO-tehm$VQ2eB+p%pcL{&y&7Bo8uWr%I26O15=wih64G2VKL5dzI#Ig8O)jR8Gl0 zt!yMG3f8}9k9Wz|Mx*BtflaPdR#<*4pbiJ8a!2z}zmmOSs&-2oMg-@EWx@OXm)Kc8 zfx6S{Lnu%f^zRZ)6#v#W5pIBLR>0d8C&RJ8y8v5E@)5|Z85Mx#87ZQdqOl2>^y*B$ zhO_b&_OW)4X@=4ovZy6v9<1dZ{p-!zv?3WB!R74>wB7SPOoRvk2 z)l_>Z^&ZCqR>NsB>Fk9-vG)wlf1X#|h&h;ySCj5xc&j$c-1P*<#ENT(+m09c-g*Xm*L0v{3Ln3a3um1!rizL11+!jDwlW3A1HzG5tBY|f)#;&BvyFlK!xUc&pJI0Guh=%sMgOrh;-kT#;O{KIaepuNMFT@WjGMZU$+|O6A)wc(0 z!AEKmXlp@RD?#yerm<&JPb-)`ELax;t#621T+^Hc1>L{%ai{^Zl}3uiTGKwd`n6$f z>}RR^kF!$B4-tv%2yYHjb!>cAiq3ucH|(}a*rU3=BT^K7u87{iBui!bW&4mJvw;p+ z#Dal6L+JyW-Ar9lexvwhBRM)Sj<;x|3MVb@Ef(<--DG1>07i^A`VAJp&FU@3`|8Y( zmq2={MbV?kAe2iW(X4}!Jm;Y(oqEUPUQ$}ZEu04xcRTSZjPy$X_xfPo?6&4=vpUC? zxtE^sRp|arOW5rRO%!F?ME@{j8sk@VI5B!Npi@;U^QWS+)|@@(>ft?i&hUd+(s!uH zs;5S0AiB~6%D`IvxM=g?k>L!=Jnhs>bx4dLz4r!Bu#9Q`uarUs)?vwBuj1#&QuO?m z%-Y@9(mMLk_#<#JuEG^D(pzp#0_uA66%b^>$~M8|Z+FrDUW&2dIa*?tafdJGF0v^4 z?=sN8Y-ajw-<3u&k&7ZEH%sXqx~og_vEUK!BIH<+%`%)328F>*7}5*N=)Dpy#;20v zEMw%n3QyJ#BG#8GIxs3En2(%wNj*Mr39sVW)?@AeGD%eJcxuyXyR_zCfe|VVP&QQh zXr8xtiYs9yHv$r$L)Ct0P|k7*2sEbW!Mr!A)%-9L>7_&{tppwnv9voKoBp#^0-56C zxFxv!?QaT&h9M<0Xu|KQ$=`uGl+;gND~ae9rj2PELR{YUI5Eal^a!wlgYs{|!iQJA`(GUjGc_Fawlq@UHJMEPajkP(d6841Zu%xtOX?u)2xkT7oCR z4a=6cSJX?g#0sq(C5j1FE{%BFE3$#$PS(b3cx{&)(&s4_O+RKwkn&>jG9WpXc&NoWpd}}E2 z2XeH4(OTdC=k&rrtD-R*iNjTGR!u42sh03tOOl=~oOt!E5E1&5^?M(nT-G~cvzcGZ zUgwY87cbXjZsiXcC2Ibd)x%>Stw!#du>Nu?$v0pqHoV6@l^}fQLAE|am2ro%%!hcw zgyr#hyLGxNP)yatU$^xo&SZMQEyE&qi^R0_Q+5rZU%@@YCK3c+aqlNkTLaNfW8MG6 zysdhxL{4l~rbOPIuf)v&#pH$5Wm=9&{}ZnX^Z3QQsyS!Pub^EW_vG zi)fcIrz_JiF!zq(z$rpb7H z8zTytT52XvV<0Tr2tGAxsN!S zHdU}RR3xScvp2hkT-2B#@!d22_03N5BE0Xa%}G^GE;kvGc1Edvk3p+sNxVH_PxdYB z<~t3O?>$WoZuL~|HQN;8`aH*(+vwY4qqlx#huLHD&BOa$Nr_DT%=j=}lK^|7Bu}29 zq24?81UWBI26qyf z?%PS`70qk%t&0TN`U9!ffu6+h1r<^Lk_T80-d+8#N#F>_;Rl%I!513?4;~Hu=rale z19uvMGkJD5-cM}%pU*dW%{4}Hn4U0`Fk`tu!ZT?h%$)nC)%nux>F(DiBn_$gw>rp> z1D3Dw9El^MS1RP4^FS5X1>sB;n{hT^Gq`ztmom#`U5mQny;QRJ&{(us`JESZW|S1% zNb*r+%o=fm_;sZq!<^Qoa!v9?)0Qs~5mEz%op2cjt z{Kzu0lt0+MO^bBHVG3?!$5?r2f(R!zo z%Gi>c$d?23B^K!HZ$il@>p3P49w(E*$ls&4I6UX-aH#Xw#N>|?5V`ss=F~qK0`iYy zMZY@Jq~l1zhhQe2pxn2mFb|R?-~WvTIG)>+Bj#k6uwO(HeSD>w&N6=OR5_ADF@sAz zP_O8hp<_Wj)_tM9@seBGwOqEPCC@BRZ+&}y8>+@F7*rVIA6sNB2^VOH@)h6e$YN#b za7fn^ES{#6=i|oD<_|Jysae;>mPpNie8RW?Q@Fj9lAmb8yhz3iPO^&0a*tM%-4RTT zBpUDsv!H@Obz%!l+XLe|4gTjTQIIRslxWC!6HdO^p<#waz3>q|jF%*sSN;LZBAjqN zZt8k6^_S=j_Pnrgry_=4>P{x$#ij9oo*u+dGLm)~4dJh~n}(PJ#6LgLPg) z?eEKkcf)fQwZ?G?p8!c_&eWstK=K}BO{_&NbhMs8!y*=09!=T+t>pJ<5paI}yFmA^ zgdkt_Uih${`DVi5WM+~!x73NJ?S1E@ppG=(N|cCd#Vb4ob5ZkhM3>i>uMDQ{Z?oI( zH3_aC!KA`fx}$yUZ76X(SHBGV^(^d3^LbNAJDyJL^I}RdF4w~h(H~>qg~A+r-%qFA zO^y^V$_?khDRicXc$FYAncxg_ptSH#!rI$!nmua*MQR0kAa%G6|4*o5jecAnFbbDJ1=?tF(1B>SF=Zq|Y5Q%~ZM*BTTJ(gx$bsEbm{IHA`HgNK z+X{z%8jJT?!}p>>%^(}?k@lWIQE*1f|NgL`IU3;AGd|A~yDOp(Fh~*8bXiuE1NXhs zgGZ>2D@FU&pT7v0IxQOt;^EC<_491215m&aW^fYII<$ipSbz z>c@qd@ka#@JMU_wh<0_o(O1~%>_--=-XRy2_&mqc#towhuX@6PD_yeSho8%Af$fW#?VkX4_UEUcRYcgp@iq%I1f7Nm6n}TrocHa#AEE~+{*6lD&h;6A z+>b9o2@5J{0dLMFhB$kpDV-j(f-QDX2_K3EcaXuToDJwgx6*snh87&wv9k_I_zSao zB*i}Z2ZY!Q3>ISo(4w^K2fjApd-qbt$88Z$8H6$eK8q`Dt(&KXCL1~XJEwnR?+&<2 zaRppXOvH)afEZy3SdB4+ITUx84s)=m@k#(zGJDp2bI93ZE9`7&*}e}Rfr$)IAwpGmn|Y8ytvk)Q{s-A>sx zMTcO5>4*a!#df!HD2Eg$tJ_g1bw)*ClM&s^N~jOPB0 zYO=5uxfN;tp`5&nsl1vqiJ#jAP%X+hhZ)1FW7*xu{jTe4ft{AT@o>Or z-vS1s4Yh8ri2kfY>%$6cJn~Uc+hZ!E*~sKPc1*#Y zWxD<(gCq*pM>K{i468<=Ivs2My>^zxC2&?8=ADAYc|+pW(1q}v&SgWs8b?pnSfy{1 zc@%RSYcDE}7d1H0pD=U!?{h9{$xicp)gd$sjB^!s|8fZiu;v;e?;#JzL8QK< zOd@R$oJ><-Elq-K-DbP#$c^xO%wfb^Hx0CLRuM(eSQZ@9+9f)5Y*w+QIXB_t^NWJE8g5Y{6voLSEGfx_P{6%z-ss;4lLJbj3bKW1SP(aLq zuRul@AYyOWdqCT@<>C)JPCQ=D))}m>$A!J%A#3wvE0%|^1x&w%<>TjAP~QXHClJtf zy}009_fNRwXY|#bi-JFpQ_c@80lP27WYS#;Ct>;0s$xkZ>ecmlUHr(T@CQ}ShVCu! zeBL-p`}O3mcN*9c?9#Q*Uk(irs!CCCEbfMTgxDdH3N=>v7d!BCNy+?d71^NQO{~x3 zLupmy^Q?Z4(c!QUWew|~mNx+miG>+-Qt+NC2f ze-ONb8@!_)e4u{Wy#QlSMe#e@dvXp~$E;o>>M8GKpcYUY5Q)jRgK-|RcJD}NOeeeOPCM#+Ek6t@r|BZQ8 z{rkdQh4RF~pc3+UKz!mrwinV|zg!D2Kso6sv~?r}LR%QZz@Oz5f)HdB_?K0wp?lPz z)kTtSHOD3;=(Xf<)kVU=!@4Clt zQ_tf7xkzA388s=;kN_8FzZcJ?&|_23k0XJVLX#1TlFqhs<&br?9|(=4OUAr|7JlR8 zy&8ix$_cvl-b-HS4YW3QNPKn)Oe{!ci#}M0~_li z*Wg~AEhrEHg9()w3*R=~M*0@IC(+Kr52_%I=rwMW$U2MeW!d&=84xfgsV0A2UuIO^ zE|lejjsk@x?b+bOiZu>EcFuABYvbS%#=TY*I|3qUQPEF``VSnq)W~;}WVUMLfqjT> zJeeIko0G7*_BA%c4G}U@kpa-j({0UuaFBZw7(~?T`7swZ|cj)3|ZLPRps3)6ws0c0TCj?M&VsM8+1BAgZQ-HMtwd8fOQ@SLsLC7M3X+|L+(-T(_8Mu(mASs{Z6&>zi`cmS~8+8<*4iw4+6UAo#v!; z8==X8d(R{3q_RQl7xJDcpCI@3Hh_z9CzHf-D45eHQ@gbOU2-Fd@Amb(p9Mvx`{a|4 zzQpGnpuQ3ZWL#%7&uzxn8{k*=<+J=;4wvGSdvZE%tJS&?;7z|wlPh&$a0Ue!pm{)0 zn)CcO;F$pAS-FxGbx0ttAfFNiiQJB2H|nR~EL2hwC(g%({q;6;T&DXt%YlThWij7l zRlwF~bxprzUg9n4);>T~I|uaQ1hwH82{J~lMN+%qz?TC!l>njyQ!}4fvXijUYUu_H zf%L&`HVXrH!>`dj^NIyhs%is)Z@zr%lps4JyeriGy(BSj^z1dOh{r(d;@JK7tMxW2EGelo#p)03d$ERWU5JMyKLf?am6w$98nTpiA(AR>CDO+Ig zR)vNyZ7dK8V|;KB_YSEwvT}e;#bd)AicV+=i?hbBn^GoYQ4cjZoi{rfPf-CD5sg_Yxp;58DZb(09EwO4-an?pbj5@UBr>cxUEGhcWvL`8b_uCtDes1sC;qh zg-w-LF-HonO)s!7bV*hzB%e`^+s>ed#l-Z=dsam_s=lK9duiC?K914QTY2Amo+g=_ z-DUe52jy68So&-MMnR;Hd8XiZ`qrUS96uyXDI)Bb2=?RF%Y(U0{DW43w=|%FKsQo^ zOM)ww-(r}=aj;MmH-+v7(>cQJ39;Os#m7y z*bJSjca>+kW9VLDWutBv5*y*zo%ql8vI=`J=7D4LQ_9aTQ1i=nM5J?w`OhD(pfz$4 znN&YhYh6j1Lk4Lqo$h3VarqCvgsTkA#Of1Pbi8^Ry|Ly&9 z|3wfsCjOo$3lJjFPj_V_t#6M--7YXx93Xp3P^D^LB_21PI=z$BPJf$8J0YJ&=gq5E5}=dlvg^uq z$PeGf4pt~I$sfG*_0C^G5_&#@CB7ddvpgz)KCj3R!UUT)Hl88N$)weu^WR*yz7rU& zERGqmXU6dr*`fD>-TPD4^Jpm=9&-bexeV9OAbY!hxMr|#)=SJUJ;Ks?xbW2Z zf-@Fgz^6ohe+dxPG?;!;Cgp3w@D!a;fptkG4G~{)>p<5esCVGL(It8WJQ!ONnEALD zo(2{<7l9ZTZex<2TxSl&IoN?38PCyIm5fXez!0Fn5(s;Yr2p&c%HyH#+P;0rI`(BS z4YIExW8b&3?;#^gNU~Iv$ugG4l08#GBeE+?N%pbJz7$f?Sh93yiBP!TGtcwh_p8_E zGk^KxH*J%;9*bBG57>L*J+P!Hc{CF_!~t!6$ekeH4u@ z66E!{{5Zu^()mgq>SE>n*t5>i_M~_YM`6WPteP)bGMWZhr}dnYSZ};AWx;ZL8<8gS zL}I_1ye(A!JD)t9G9X8h>B5NL$QvLAoy1Xn-z166KvUJ|X!IO}1o@}*kvgjZ5zeH! zI@iJ;Q~j4A1T+aK3&J#G-tFe@8?oeNR@4@R6#jZ}+!veXK z1?If0gR(@OWU2p|~b$?}g?*9Qz}JO$)qpm#@_V|Edm7Onfx@Y+78m1)ldnEuSv za2Gr-ElRcK8tZr&#b1c4_UaG>kZnDjIM*NMg2TdG(XRM6@ZIa`Y{mli4jxViFr-A* zOli5(JCeW{^xRx;18)UjOb<>{cuy>77Z_zK8+RV(#a7PkJ`LXk$}z_I>s1j#DdS-`mo))vCy~B?$q9&j?PSq`mxeH zQv=tH9~t^?pJz+2`&NyTay%_Un#W0+#s(Z9cQ!WY&wwE=VA#R|K=Qq_);T7ZC_beH z=CE+PTM=*fOE#l6|)XTOU-BI$2Jp8Wnu@)!dyem$Kk8M~E3#HAHJ zL_pEU{;2x|$S0h=t|mDPM5o`|R?Y;KBub`G(LXNyzC(-vY@y;$VH6FLH9#4(bFt(x zDvy1b<7TvsdV{DF0l3}vxCZM*901J@s4ozdK!~l^X4g*G2WQZsgUm|n@!SjePTY@^ z*v3n0c`m!KK@WkLi7nSIh-Z|*qKCHGw^Ydl@}zV8^p`dpk>^huw8T4$pi*q1z|7yMsc_k@v?RhgGA^gT#zRt?$3$ zX59r2ktQihJ5})seF<>JiQj;E^Za}WN3X@-Jv;{A0$8GA3n(YRjQ}$Z1oQX+U5ohx zpqa_5i3Ex|4c+}YQ}RUIl!iwC&Q;-Px4@J3LLV@s`v>q`)XEWbB6@%He6-a-yU;O1BKP+4BT`vA?-+F@T`EkN+YD9e0?Xwzu~1Eq?za#lA2Zo z=Yl}YyfXfNL7SlwjcdMd0TodP{OFcW{_omkWN*~iR1n{QW-c3zZTcvA*P z0i!ts3BJ+-4^js2RsW_GN(dc2?a5}2NN0k7+LbRt8e8u!OYq-(wl;RFGLoJ z0XZXKLu2>xjRJ1xOukZh+^D}Dr_kSk^Q(6RhGacEMUu%?B~By|k~6$6BJ zbIbN=%<*%k4&vNj0ik+HSmsq53Uykb>on=WCkct3>2A)AN{KF=pCQSqNdh&Xrb*ue zckBKh(41(3+&zQw(X?Rdc|#&*`@uN-QnlAqmCJAJDRav0?vF0moPD)aMY1SqFaUp~{W-f!Bvnfjv) zV{BgsW8MG~`h*&=p1iuE%!wrCd;U(ImaQ^1urfk%7?uhAt(Jw=*2|4fd2*a!X>pWE zB^9;T)MHL3V`;wk1&9>CG6S{=lXYqHS#pR<0KW=V*kLH|=ZG?bxsg99~IsyBcH4Q&H-za3p->jVd>*e(W$DwWT+9S`+|-7c}kUS4Y!I> z`Hrz@yE;lL!8bwySyhkJ8Gacqk=!nTT3-h250Hv%+`o<%ikK1s3UkL!r5SQWQ6*1R zftO?IQcz_zHFMi@b{Uhpj4SF3JX*gBwGvmFv;t_eH-5Vhi3xmEgkXTK@jRCa9s&fe z&&_E*IhyMXV^3~$ZD+Ys38Pr*@@nub!La%0)UYlg7F9pP7b~pyI>1@x-zXdtKAq%j zVCWFZCZ1=^jd0m+w_sM)E@H_=X9tI6PYpn8qKYsv>*1SRgkxBd8+DRmQHb`{>ZtdW znD^oouEwKY8fO%K0;lUkK<--m@aa_7&@Gj~NJV}MW8K#kmnf=7cCNcOdWlv>e>^l5B%ZII<2SBa^+8=Sid^h*LfsTK@Bq4 zAbF%1^Ze@E-93Yq(nxSB5{6tgMKxBWN8vlH?RjqiPvPu1fFDwsI0q(2n9(d()^kok z$I&&^4ZbY){&Dk(^D3jsBI~5Z&YJFI(?_#raA>$|oGn>He^} z@SvyB^YV~kY61mj5oFBmh3Oi{WRe(3ibjgY;lOVD0-qRp{axVRoQTs{E`b;yfF2P~ z>Q@t8GuNoHd+$UbhD>hFeUo*87rLN=22`gb+1X8KE-4;)y(hUj44)%K>aB*|CeFa< z6itD@R$Uw|fJb)V4dbyhuhgNw)?lPqVKa zZ{xHF$=WDIO#+|gc8!Rr@x9R=ojiYDxM@1^!Y*e}H=QSe+)S>%K`|m|_~fp93EsM6 zGv}3ei+Jff?Q568=(_dQcUhL^OceK5Syl)DO#_vU2Cxm)pGcuED(5;Mx2HkFAPE5Pu@&;Y3x{SkiBL{;b zqDQ|#vRAyNJ+%<^d8^f!CH;2gaH{*lM5F0MWyMP=rtUhi zeV~8-&)9?gUM+#gT7W$*%6#-$+dtwkc9=zKe1vOfJ`#9Ha(9S|9!-kRiGLy4T#;rX zl!uU@1JK|1H+fH&n0L{Kz|3M);}_Vr#K0wWKvy$ zyZCFE`CZed32fO1)!JYE{B22n^5RLOd3=>6aj%YZ-WxJAKyhVs-7(gquj(=qbjk}~ z`yCXVS@}RK?JIrdWa8^jUQR^mDaR*PKdsLFT$1!-rT{{3I{Ry_@H23e!=NABOyILl zb1RZT&IE6&B32-zxLIjOx?gt5&Vt z^|=My%jgapQSyWfhFAZcLQqdu7R17-W_;?hdF+D#WG>vN8{~Ph zxVG@T_jT7BzO~~yPrW6@hsh5H-U_GmTGgYyx?Ml)>sY&*)4=-@g@yUWlgBH~n_uYG zh3@-Qu2LmH`sKr~X8PLRs;oO>>#2C4#7NchrxHY6C`67OUykOyy;L2>W=OHzot(2o zwM7V;!OF6-^YkMze%XT%uY<_|rWgs^KU0EDIGp&#^3f z`?iEUvG_65bUG{JwaKI-+MjvY+7zC0mP zsB5h2#=$o@p)q@*@_EM9HHjx@ckr~3t^s%YV4@pAK}n5gGHZTSASh(9aJO~g8Wz5| z?&Tbh^`;-S9kq#MOAs@rp;~`XoV1v{VEb;3XxoOzE^~|Yl{r*XrF9OU$nT@1H`|-W zrr$1X*+!($W*A({k$3$X6Wmj}pU#%3M_Ka)TbGl5U8-Lmt@Fqxvr5va7$r7Q*Cu0T zu_ZhtE_xGsn#SJwgW}~FOwuBK?enJ%*05*Bx;T4%-Z>ZX=x{rI2l}gwNGU;HHH5ck z(xh`7=Fc@Jc10Crmz}z-I6O*eHtH#)XLBUP?Rq(FPYbsG_>uNO4jrs9c0VhWf}X0*;?ey4dH_;0R! z`5>qEV6~Ro+9;iWfbkIY3SZy05y7gB3R*%11XR!_i?)gj+%CV3)7;W~>~lNg(a2{e zv^Kw{xa|K(-d<80mwUI{`xAVUWK|T&!hc#n`|OW)&^6<{d`X#gk{&&e)z)<04*sBQ zPG^s)RjUmPgFww(7vfT7Zn1w|ab@yL!Q==@`#SDW6V{|Z^-R|se$xTC$fC)_afCL#7q3h+AvH&>HuiB{W1@4?wLD9qkdO2J6Z z*ayDCJEfE@pD|F0b93M(FzyO6$X z7TH3DHjSc=E?QLzn@Q!{QVV&|T~fxW=-2QZGc+sqlSJm@we&uQ2la7fxm+Bwch`|4OC-RNVQ1?UM&nK@)ANr?Nh*HxKWFJp@{&)9B>FY!gI^Xn{ zonu@{xP+DqF0CE3I!zzXt#-!G*67qQZLNgZtY8{TG-62PG`p0}StEUQx4@j+dfeGZ z?gOyaSVUpPV2|H&I~GTY{(os!R8jELs%re}D${mDzx=Ksu^djGe)^7C(@2ql84!>6 zK;8@1G7@k*zLzL(HGR1=z`%ILj*lZ)RkBEgpBH|H+FZ_l801m|*HKE4-yv&nb=M&_ zKFbz%=k`@0!SVbY@JQQHdf1kiv-hy}GvR-YAVKS_{r)Zr->1wOSD?kbS~IIanQcQM z0;g;zEUY-Dafd}rN!e7R>s95rbG3-Us6{}5x|H&E8NnfP&a(M*6040mgi0u=)Ru6E zo|75fQgvUpcXPW=iO)Lj+Bfd}HrO8(ByiiyG02ua)!|vslq8Zi zI~9G#?3}puIPHoDz4#3EkN?=W71G%9X|J?sZq9=>gWHGVcXhCsEEHJ9&$oY3exO|H zs)L56%jXW^SSqqzV)@gGaWsMLRvx7E`0lUQS3MJPviB(06pId_p3j1KOOq(gG%y~< z=5#P*u5XuN?6q5BKE`yGnN|Byw12Fq**bx2yTc263HIu~EHh1+yWV6o;D4;M`iIqf z+Zr=_Dkr+$!V?j8?_q>W@0N1pl^vYK<*&uudWn6pU+OyDz248ZA7TdRgeX`IHA_xA zrNa(p5Y#i>`&C*PyG&Ax>R+v6^$_RIwZ9BKB!4^&n~%an4oj~E=30SrPWHL1j<7P- z&|&-r@`zG_rdG$H-tTa^$|+XXF$moVIwdz*Rv%h7zch_Iyc3d`{C2?HwQ{l8nQ6OM z3RphZbgq@3bH3Ob4Lp!146pweD)>hLAa$LPGSQ|KU%oF7-yX`==Gafid&8!ff6lK= zGty|8l5HvU(+oZI35;$GhId@6bXt>snsgH`_t6Dr3J1R_9dY2qeM8Fn7`w$Bv%QrVTz-#X4V%xWzy| zq_ZVtgdTJr=6=}b5<cU*treI97E)vWwtJ;>jUP`#5#*T*$c{!|6Y}r4MyO`(1 z!`zJ1Zq>B2(;4ghNQA$Gl^leZVyXS)u6;=jh3Vt{@sRHGG@5?JkOcBJyjAWu#mgR8 zW3xv`2IJ{Y>i6P^mBfraz#TLlbt0S+8m*ax9UL^S4W)!3bC>2fU4@+Gu7({Tom>@= zLgU)Ump2tH(`+b=C`2V4j~d@r;{OBu{NLNuDdOZXyfhodfWN7~EFTFDKth>&@~JTw zF&SMt%ftJEKW3I$?`3IkwF+}T+V#t39^kkUB#|NA&^Np0KVi2NU)PDvD98v}@G9 zF*1A)+DdD0^0-efex(Xhl(rsL9`g6~_n09LoI0JatJwUq(b2;oIG&@Q=aJAO>GAfl zSEXw1C(8}eyZh$@%(q^>xH}a2zd#cSY$AO~7Qe@lcjR+apRi50kbyrF19Sc7x+u*5 E03M<(r2qf` literal 0 HcmV?d00001 From 1fdf331ac7c89c63d5d27b9246093c9d44af2e49 Mon Sep 17 00:00:00 2001 From: Sachidanand Alle Date: Tue, 16 Mar 2021 05:56:16 -0700 Subject: [PATCH 10/10] Fix name for RestoreLabeld Signed-off-by: Sachidanand Alle --- deepgrow/ignite/inference.ipynb | 4 ++-- deepgrow/ignite/inference_3d.ipynb | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deepgrow/ignite/inference.ipynb b/deepgrow/ignite/inference.ipynb index f3d9282b8b..385c750556 100644 --- a/deepgrow/ignite/inference.ipynb +++ b/deepgrow/ignite/inference.ipynb @@ -184,11 +184,11 @@ " tname = type(t).__name__\n", "\n", " data = t(data)\n", - " image = original_image if tname == 'RestoreCroppedLabeld' else data['image']\n", + " image = original_image if tname == 'RestoreLabeld' else data['image']\n", " label = data['pred']\n", " print(\"{} => image shape: {}, pred shape: {}\".format(tname, image.shape, label.shape))\n", "\n", - " if tname in 'RestoreCroppedLabeld':\n", + " if tname in 'RestoreLabeld':\n", " image = image[:, :, original_slice_idx]\n", " label = label[0, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[original_slice_idx]\n", " print(\"PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}\".format(\n", diff --git a/deepgrow/ignite/inference_3d.ipynb b/deepgrow/ignite/inference_3d.ipynb index d7221592c8..1438694e15 100644 --- a/deepgrow/ignite/inference_3d.ipynb +++ b/deepgrow/ignite/inference_3d.ipynb @@ -191,7 +191,7 @@ " label = data['pred']\n", " print(\"{} => image shape: {}, pred shape: {}; slice_idx: {}\".format(tname, image.shape, label.shape, slice_idx))\n", "\n", - " if tname in 'RestoreCroppedLabeld':\n", + " if tname in 'RestoreLabeld':\n", " pred = label\n", "\n", " image = original_image[:, :, original_slice_idx]\n",