From 0db62ec58546a46e298d911f038881e005eb0fcb Mon Sep 17 00:00:00 2001 From: SACHIDANAND ALLE Date: Thu, 5 May 2022 14:48:35 +0100 Subject: [PATCH 01/24] Only Hide Training area when zero tasks configured (#769) Signed-off-by: Sachidanand Alle Signed-off-by: Markus Hinsche --- plugins/slicer/MONAILabel/MONAILabel.py | 4 +- .../MONAILabel/Resources/UI/MONAILabel.ui | 136 +++++++++--------- 2 files changed, 71 insertions(+), 69 deletions(-) diff --git a/plugins/slicer/MONAILabel/MONAILabel.py b/plugins/slicer/MONAILabel/MONAILabel.py index ea152f49d..395ac0288 100644 --- a/plugins/slicer/MONAILabel/MONAILabel.py +++ b/plugins/slicer/MONAILabel/MONAILabel.py @@ -506,9 +506,9 @@ def updateGUIFromParameterNode(self, caller=None, event=None): self.ui.scribblesCollapsibleButton.hide() if self.info.get("trainers", {}): - self.ui.aclCollapsibleButton.show() + self.ui.trainWidget.show() else: - self.ui.aclCollapsibleButton.hide() + self.ui.trainWidget.hide() self.ignoreScribblesLabelChangeEvent = True self.ui.labelComboBox.clear() diff --git a/plugins/slicer/MONAILabel/Resources/UI/MONAILabel.ui b/plugins/slicer/MONAILabel/Resources/UI/MONAILabel.ui index 53f9ee102..481d9847c 100644 --- a/plugins/slicer/MONAILabel/Resources/UI/MONAILabel.ui +++ b/plugins/slicer/MONAILabel/Resources/UI/MONAILabel.ui @@ -175,74 +175,76 @@ - - - - - Status: - - - - - - - 0 - - - - - - - Accuracy: - - - - - - - Average Dice score computed over submitted labels - - - 0 - - - - - - - Model: - - - - - - - - - - 0 - 0 - - - - - - - - Train - - - - - - - Stop - - - - - + + + + + + Status: + + + + + + + 0 + + + + + + + Accuracy: + + + + + + + Average Dice score computed over submitted labels + + + 0 + + + + + + + Model: + + + + + + + + + + 0 + 0 + + + + + + + + Train + + + + + + + Stop + + + + + + From 25f95ab0584fe549a2db379f89800c47fb0dbbff Mon Sep 17 00:00:00 2001 From: SACHIDANAND ALLE Date: Sun, 8 May 2022 14:03:05 -0700 Subject: [PATCH 02/24] Nuclick train (#772) * Draft Training workflow for NuClick Signed-off-by: Sachidanand Alle * Sync up changes for nuclick training Signed-off-by: Sachidanand Alle * Fix nuclick training Signed-off-by: Sachidanand Alle * rename transform Signed-off-by: Sachidanand Alle * Sync up changes for nuclick training Signed-off-by: Sachidanand Alle * use monai bunet for nuclick Signed-off-by: Sachidanand Alle * fix log Signed-off-by: Sachidanand Alle Signed-off-by: Markus Hinsche --- monailabel/tasks/train/basic_train.py | 9 +- .../monailabel/commands/RunInference.java | 13 +- sample-apps/pathology/lib/configs/nuclick.py | 30 +- sample-apps/pathology/lib/handlers.py | 18 +- sample-apps/pathology/lib/infers/nuclick.py | 7 +- sample-apps/pathology/lib/nets/__init__.py | 11 - sample-apps/pathology/lib/nets/unet.py | 105 ------ .../pathology/lib/trainers/__init__.py | 1 + sample-apps/pathology/lib/trainers/nuclick.py | 298 ++++++++++++++++++ sample-apps/pathology/lib/utils.py | 83 ++++- sample-apps/pathology/main.py | 43 ++- 11 files changed, 454 insertions(+), 164 deletions(-) delete mode 100644 sample-apps/pathology/lib/nets/__init__.py delete mode 100644 sample-apps/pathology/lib/nets/unet.py create mode 100644 sample-apps/pathology/lib/trainers/nuclick.py diff --git a/monailabel/tasks/train/basic_train.py b/monailabel/tasks/train/basic_train.py index 8dd2d205e..ccd9aac1f 100644 --- a/monailabel/tasks/train/basic_train.py +++ b/monailabel/tasks/train/basic_train.py @@ -104,6 +104,7 @@ def __init__( stats_path=None, train_save_interval=20, val_interval=1, + n_saved=5, final_filename="checkpoint_final.pt", key_metric_filename="model.pt", model_dict_key="model", @@ -123,6 +124,7 @@ def __init__( :param stats_path: Path to save the train stats :param train_save_interval: checkpoint save interval for training :param val_interval: validation interval (run every x epochs) + :param n_saved: max checkpoints to save :param final_filename: name of final checkpoint that will be saved :param key_metric_filename: best key metric model file name :param model_dict_key: key to save network weights into checkpoint @@ -157,6 +159,7 @@ def __init__( self._train_save_interval = train_save_interval self._val_interval = val_interval + self._n_saved = n_saved self._final_filename = final_filename self._key_metric_filename = key_metric_filename self._model_dict_key = model_dict_key @@ -340,7 +343,7 @@ def config(self): @staticmethod def _validate_transforms(transforms, step="Training", name="pre"): - if not transforms or isinstance(transforms, Compose): + if not transforms or isinstance(transforms, Compose) or callable(transforms): return transforms if isinstance(transforms, list): return Compose(transforms) @@ -528,7 +531,7 @@ def _create_evaluator(self, context: Context): save_dict={self._model_dict_key: context.network}, save_key_metric=True, key_metric_filename=self._key_metric_filename, - n_saved=5, + n_saved=self._n_saved, ) ) @@ -560,7 +563,7 @@ def _create_trainer(self, context: Context): key_metric_filename=f"train_{self._key_metric_filename}" if context.evaluator else self._key_metric_filename, - n_saved=5, + n_saved=self._n_saved, ) ) diff --git a/plugins/qupath/src/main/java/qupath/lib/extension/monailabel/commands/RunInference.java b/plugins/qupath/src/main/java/qupath/lib/extension/monailabel/commands/RunInference.java index 1b75a0e23..119f5f170 100644 --- a/plugins/qupath/src/main/java/qupath/lib/extension/monailabel/commands/RunInference.java +++ b/plugins/qupath/src/main/java/qupath/lib/extension/monailabel/commands/RunInference.java @@ -94,17 +94,20 @@ public void run() { list.addIntParameter("Width", "Width", bbox[2]); list.addIntParameter("Height", "Height", bbox[3]); + boolean override = !info.models.get(selectedModel).nuclick; + list.addBooleanParameter("Override", "Override", override); + if (Dialogs.showParameterDialog("MONAILabel", list)) { String model = (String) list.getChoiceParameterValue("Model"); bbox[0] = list.getIntParameterValue("X").intValue(); bbox[1] = list.getIntParameterValue("Y").intValue(); bbox[2] = list.getIntParameterValue("Width").intValue(); bbox[3] = list.getIntParameterValue("Height").intValue(); + override = list.getBooleanParameterValue("Override").booleanValue(); selectedModel = model; selectedBBox = bbox; - boolean isNuClick = info.models.get(model).nuclick; - runInference(model, new HashSet(Arrays.asList(labels.get(model))), bbox, imageData, isNuClick); + runInference(model, new HashSet(Arrays.asList(labels.get(model))), bbox, imageData, override); } } catch (Exception ex) { ex.printStackTrace(); @@ -145,9 +148,9 @@ private int[] getBBOX(ROI roi) { } private void runInference(String model, Set labels, int[] bbox, ImageData imageData, - boolean isNuClick) throws SAXException, IOException, ParserConfigurationException, InterruptedException { + boolean override) throws SAXException, IOException, ParserConfigurationException, InterruptedException { logger.info("MONAILabel Annotation - Run Inference..."); - logger.info("Model: " + model + "; IsNuClick: " + isNuClick + "; Labels: " + labels); + logger.info("Model: " + model + "; override: " + override + "; Labels: " + labels); String image = Utils.getNameWithoutExtension(imageData.getServerPath()); @@ -163,7 +166,7 @@ private void runInference(String model, Set labels, int[] bbox, ImageDat Document dom = MonaiLabelClient.infer(model, image, req); NodeList annotation_list = dom.getElementsByTagName("Annotation"); - int count = updateAnnotations(labels, annotation_list, roi, imageData, !isNuClick); + int count = updateAnnotations(labels, annotation_list, roi, imageData, override); // Update hierarchy to see changes in QuPath's hierarchy QP.fireHierarchyUpdate(imageData.getHierarchy()); diff --git a/sample-apps/pathology/lib/configs/nuclick.py b/sample-apps/pathology/lib/configs/nuclick.py index 32db53459..42533c2c3 100644 --- a/sample-apps/pathology/lib/configs/nuclick.py +++ b/sample-apps/pathology/lib/configs/nuclick.py @@ -16,7 +16,7 @@ import lib.infers import lib.trainers -from lib.nets import UNet +from monai.networks.nets import BasicUNet from monailabel.interfaces.config import TaskConfig from monailabel.interfaces.tasks.infer import InferTask @@ -42,11 +42,16 @@ def init(self, name: str, model_dir: str, conf: Dict[str, str], planner: Any, ** # Download PreTrained Model if strtobool(self.conf.get("use_pretrained_model", "true")): - url = f"{self.PRE_TRAINED_PATH}/NuClick_UNet_40xAll.pth" + url = f"{self.PRE_TRAINED_PATH}/pathology_nuclick_bunet.pt" download_file(url, self.path[0]) # Network - self.network = UNet(n_channels=5, n_classes=1) + self.network = BasicUNet( + spatial_dims=2, + in_channels=5, + out_channels=1, + features=(32, 64, 128, 256, 512, 32), + ) def infer(self) -> Union[InferTask, Dict[str, InferTask]]: task: InferTask = lib.infers.NuClick( @@ -59,4 +64,21 @@ def infer(self) -> Union[InferTask, Dict[str, InferTask]]: return task def trainer(self) -> Optional[TrainTask]: - return None + output_dir = os.path.join(self.model_dir, self.name) + task: TrainTask = lib.trainers.NuClick( + model_dir=output_dir, + network=self.network, + load_path=self.path[0], + publish_path=self.path[1], + labels=self.labels, + description="Train Nuclei DeepEdit Model", + train_save_interval=1, + config={ + "max_epochs": 10, + "train_batch_size": 64, + "dataset_max_region": (10240, 10240), + "dataset_limit": 0, + "dataset_randomize": True, + }, + ) + return task diff --git a/sample-apps/pathology/lib/handlers.py b/sample-apps/pathology/lib/handlers.py index 33f19154b..f19484ca0 100644 --- a/sample-apps/pathology/lib/handlers.py +++ b/sample-apps/pathology/lib/handlers.py @@ -141,7 +141,7 @@ def write_images(self, batch_data, output_data, epoch): label[y == region] = region self.logger.info( - "{} - {} - Image: {}; Label: {} (nz: {}); Pred: {} (nz: {})".format( + "{} - {} - Image: {}; Label: {} (nz: {}); Pred: {} (nz: {}); Sig: (pos-nz: {}, neg-nz: {})".format( bidx, region, image.shape, @@ -149,6 +149,8 @@ def write_images(self, batch_data, output_data, epoch): np.count_nonzero(label), y_pred.shape, np.count_nonzero(y_pred[region]), + np.count_nonzero(image[3]) if image.shape == 5 else 0, + np.count_nonzero(image[4]) if image.shape == 5 else 0, ) ) @@ -172,15 +174,15 @@ def write_images(self, batch_data, output_data, epoch): break def write_region_metrics(self, epoch): - metric_sum = 0 - for region in self.metric_data: - metric = self.metric_data[region].mean() - self.logger.info(f"Epoch[{epoch}] Metrics -- Region: {region:0>2d}, {self.tag_name}: {metric:.4f}") + if len(self.metric_data) > 1: + metric_sum = 0 + for region in self.metric_data: + metric = self.metric_data[region].mean() + self.logger.info(f"Epoch[{epoch}] Metrics -- Region: {region:0>2d}, {self.tag_name}: {metric:.4f}") - self.writer.add_scalar(f"dice_{region:0>2d}", metric, epoch) - metric_sum += metric + self.writer.add_scalar(f"dice_{region:0>2d}", metric, epoch) + metric_sum += metric - if len(self.metric_data) > 1: metric_avg = metric_sum / len(self.metric_data) self.writer.add_scalar("dice_regions_avg", metric_avg, epoch) diff --git a/sample-apps/pathology/lib/infers/nuclick.py b/sample-apps/pathology/lib/infers/nuclick.py index d42156a6a..227c3f28d 100644 --- a/sample-apps/pathology/lib/infers/nuclick.py +++ b/sample-apps/pathology/lib/infers/nuclick.py @@ -130,8 +130,7 @@ def __call__(self, data): return d @staticmethod - def get_clickmap_boundingbox(cx, cy, m, n): - bb = 128 + def get_clickmap_boundingbox(cx, cy, m, n, bb=128): click_map = np.zeros((m, n), dtype=np.uint8) # Removing points out of image dimension (these points may have been clicked unwanted) @@ -162,9 +161,7 @@ def get_clickmap_boundingbox(cx, cy, m, n): return click_map, bounding_boxes @staticmethod - def get_patches_and_signals(img, click_map, bounding_boxes, cx, cy, m, n): - bb = 128 - + def get_patches_and_signals(img, click_map, bounding_boxes, cx, cy, m, n, bb=128): # total = number of clicks total = len(bounding_boxes) img = np.array([img]) # img.shape=(1,3,m,n) diff --git a/sample-apps/pathology/lib/nets/__init__.py b/sample-apps/pathology/lib/nets/__init__.py deleted file mode 100644 index 2bca7a6c9..000000000 --- a/sample-apps/pathology/lib/nets/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from .unet import UNet diff --git a/sample-apps/pathology/lib/nets/unet.py b/sample-apps/pathology/lib/nets/unet.py deleted file mode 100644 index 60366523b..000000000 --- a/sample-apps/pathology/lib/nets/unet.py +++ /dev/null @@ -1,105 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class DoubleConv(nn.Module): - """(convolution => [BN] => ReLU) * 2""" - - def __init__(self, in_channels, out_channels, mid_channels=None): - super().__init__() - if not mid_channels: - mid_channels = out_channels - self.double_conv = nn.Sequential( - nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1), - nn.BatchNorm2d(mid_channels), - nn.ReLU(inplace=True), - nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1), - nn.BatchNorm2d(out_channels), - nn.ReLU(inplace=True), - ) - - def forward(self, x): - return self.double_conv(x) - - -class Down(nn.Module): - """Downscaling with maxpool then double conv""" - - def __init__(self, in_channels, out_channels): - super().__init__() - self.maxpool_conv = nn.Sequential(nn.MaxPool2d(2), DoubleConv(in_channels, out_channels)) - - def forward(self, x): - return self.maxpool_conv(x) - - -class Up(nn.Module): - """Upscaling then double conv""" - - def __init__(self, in_channels, out_channels, bilinear=True): - super().__init__() - - # if bilinear, use the normal convolutions to reduce the number of channels - if bilinear: - self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True) - self.conv = DoubleConv(in_channels, out_channels, in_channels // 2) - else: - self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2) - self.conv = DoubleConv(in_channels, out_channels) - - def forward(self, x1, x2): - x1 = self.up(x1) - # input is CHW - diffY = x2.size()[2] - x1.size()[2] - diffX = x2.size()[3] - x1.size()[3] - - x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2]) - # if you have padding issues, see - # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a - # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd - x = torch.cat([x2, x1], dim=1) - return self.conv(x) - - -class OutConv(nn.Module): - def __init__(self, in_channels, out_channels): - super().__init__() - self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) - - def forward(self, x): - return self.conv(x) - - -class UNet(nn.Module): - def __init__(self, n_channels, n_classes, bilinear=True): - super().__init__() - self.net_name = "UNet" - self.n_channels = n_channels - self.n_classes = n_classes - self.bilinear = bilinear - - self.inc = DoubleConv(n_channels, 64) - self.down1 = Down(64, 128) - self.down2 = Down(128, 256) - self.down3 = Down(256, 512) - factor = 2 if bilinear else 1 - self.down4 = Down(512, 1024 // factor) - self.up1 = Up(1024, 512 // factor, bilinear) - self.up2 = Up(512, 256 // factor, bilinear) - self.up3 = Up(256, 128 // factor, bilinear) - self.up4 = Up(128, 64, bilinear) - self.outc = OutConv(64, n_classes) - - def forward(self, x): - x1 = self.inc(x) - x2 = self.down1(x1) - x3 = self.down2(x2) - x4 = self.down3(x3) - x5 = self.down4(x4) - x = self.up1(x5, x4) - x = self.up2(x, x3) - x = self.up3(x, x2) - x = self.up4(x, x1) - logits = self.outc(x) - return logits diff --git a/sample-apps/pathology/lib/trainers/__init__.py b/sample-apps/pathology/lib/trainers/__init__.py index 6463e5137..438bef6d5 100644 --- a/sample-apps/pathology/lib/trainers/__init__.py +++ b/sample-apps/pathology/lib/trainers/__init__.py @@ -10,4 +10,5 @@ # limitations under the License. from .deepedit_nuclei import DeepEditNuclei +from .nuclick import NuClick from .segmentation_nuclei import SegmentationNuclei diff --git a/sample-apps/pathology/lib/trainers/nuclick.py b/sample-apps/pathology/lib/trainers/nuclick.py new file mode 100644 index 000000000..725d34788 --- /dev/null +++ b/sample-apps/pathology/lib/trainers/nuclick.py @@ -0,0 +1,298 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import math +import os +import random + +import cv2 +import numpy as np +import skimage +import torch +from ignite.metrics import Accuracy +from lib.handlers import TensorBoardImageHandler +from lib.transforms import FilterImaged +from lib.utils import split_dataset, split_nuclei_dataset +from monai.config import KeysCollection +from monai.handlers import from_engine +from monai.inferers import SimpleInferer +from monai.losses import DiceLoss +from monai.transforms import ( + Activationsd, + AddChanneld, + AsChannelFirstd, + AsDiscreted, + EnsureTyped, + LoadImaged, + MapTransform, + RandomizableTransform, + RandRotate90d, + ScaleIntensityRangeD, + ToNumpyd, + TorchVisiond, + ToTensord, + Transform, +) +from tqdm import tqdm + +from monailabel.interfaces.datastore import Datastore +from monailabel.tasks.train.basic_train import BasicTrainTask, Context + +logger = logging.getLogger(__name__) + + +class NuClick(BasicTrainTask): + def __init__( + self, + model_dir, + network, + labels, + tile_size=(256, 256), + patch_size=128, + min_area=5, + description="Pathology NuClick Segmentation", + **kwargs, + ): + self._network = network + self.labels = labels + self.tile_size = tile_size + self.patch_size = patch_size + self.min_area = min_area + super().__init__(model_dir, description, **kwargs) + + def network(self, context: Context): + return self._network + + def optimizer(self, context: Context): + return torch.optim.Adam(context.network.parameters(), 0.0001) + + def loss_function(self, context: Context): + return DiceLoss(sigmoid=True, squared_pred=True) + + def pre_process(self, request, datastore: Datastore): + self.cleanup(request) + + cache_dir = os.path.join(self.get_cache_dir(request), "train_ds") + source = request.get("dataset_source") + max_region = request.get("dataset_max_region", (10240, 10240)) + max_region = (max_region, max_region) if isinstance(max_region, int) else max_region[:2] + + ds = split_dataset( + datastore=datastore, + cache_dir=cache_dir, + source=source, + groups=self.labels, + tile_size=self.tile_size, + max_region=max_region, + limit=request.get("dataset_limit", 0), + randomize=request.get("dataset_randomize", True), + ) + + logger.info(f"Split data (len: {len(ds)}) based on each nuclei") + ds_new = [] + limit = request.get("dataset_limit", 0) + for d in tqdm(ds): + ds_new.extend(split_nuclei_dataset(d, min_area=self.min_area)) + if 0 < limit < len(ds_new): + ds_new = ds_new[:limit] + break + return ds_new + + def train_pre_transforms(self, context: Context): + return [ + LoadImaged(keys=("image", "label"), dtype=np.uint8), + FilterImaged(keys="image", min_size=5), + FlattenLabeld(keys="label"), + AsChannelFirstd(keys="image"), + AddChanneld(keys="label"), + ExtractPatchd(keys=("image", "label"), patch_size=self.patch_size), + SplitLabeld(label="label", others="others", mask_value="mask_value", min_area=self.min_area), + ToTensord(keys="image"), + TorchVisiond( + keys="image", name="ColorJitter", brightness=64.0 / 255.0, contrast=0.75, saturation=0.25, hue=0.04 + ), + ToNumpyd(keys="image"), + RandRotate90d(keys=("image", "label", "others"), prob=0.5, spatial_axes=(0, 1)), + ScaleIntensityRangeD(keys="image", a_min=0.0, a_max=255.0, b_min=-1.0, b_max=1.0), + AddPointGuidanceSignald(image="image", label="label", others="others"), + EnsureTyped(keys=("image", "label")), + ] + + def train_post_transforms(self, context: Context): + return [ + Activationsd(keys="pred", sigmoid=True), + AsDiscreted(keys="pred", threshold_values=True, logit_thresh=0.5), + ] + + def train_key_metric(self, context: Context): + return {"train_acc": Accuracy(output_transform=from_engine(["pred", "label"]))} + + def val_key_metric(self, context: Context): + return {"val_acc": Accuracy(output_transform=from_engine(["pred", "label"]))} + + def val_inferer(self, context: Context): + return SimpleInferer() + + def train_handlers(self, context: Context): + handlers = super().train_handlers(context) + if context.local_rank == 0: + handlers.append(TensorBoardImageHandler(log_dir=context.events_dir, batch_limit=4)) + return handlers + + +class FlattenLabeld(MapTransform): + def __call__(self, data): + d = dict(data) + for key in self.keys: + _, labels, _, _ = cv2.connectedComponentsWithStats(d[key], 4, cv2.CV_32S) + d[key] = labels.astype(np.uint8) + return d + + +class ExtractPatchd(MapTransform): + def __init__(self, keys: KeysCollection, centroid_key="centroid", patch_size=128): + super().__init__(keys) + self.centroid_key = centroid_key + self.patch_size = patch_size + + def __call__(self, data): + d = dict(data) + + centroid = d[self.centroid_key] # create mask based on centroid (select nuclei based on centroid) + roi_size = (self.patch_size, self.patch_size) + + for key in self.keys: + img = d[key] + x_start, x_end, y_start, y_end = self.bbox(self.patch_size, centroid, img.shape[-2:]) + cropped = img[:, x_start:x_end, y_start:y_end] + d[key] = self.pad_to_shape(cropped, roi_size) + return d + + @staticmethod + def bbox(patch_size, centroid, size): + x, y = centroid + m, n = size + + x_start = int(max(x - patch_size / 2, 0)) + y_start = int(max(y - patch_size / 2, 0)) + x_end = x_start + patch_size + y_end = y_start + patch_size + if x_end > m: + x_end = m + x_start = m - patch_size + if y_end > n: + y_end = n + y_start = n - patch_size + return x_start, x_end, y_start, y_end + + @staticmethod + def pad_to_shape(img, shape): + img_shape = img.shape[-2:] + s_diff = np.array(shape) - np.array(img_shape) + diff = [(0, 0), (0, s_diff[0]), (0, s_diff[1])] + return np.pad( + img, + diff, + mode="constant", + constant_values=0, + ) + + +class SplitLabeld(Transform): + def __init__(self, label="label", others="others", mask_value="mask_value", min_area=5): + self.label = label + self.others = others + self.mask_value = mask_value + self.min_area = min_area + + def __call__(self, data): + d = dict(data) + label = d[self.label] + mask_value = d[self.mask_value] + + mask = np.uint8(label == mask_value) + others = (1 - mask) * label + others = self._mask_relabeling(others[0], min_area=self.min_area)[np.newaxis] + + d[self.label] = mask + d[self.others] = others + return d + + @staticmethod + def _mask_relabeling(mask, min_area=5): + res = np.zeros_like(mask) + for l in np.unique(mask): + if l == 0: + continue + + m = skimage.measure.label(mask == l, connectivity=1) + for stat in skimage.measure.regionprops(m): + if stat.area > min_area: + res[stat.coords[:, 0], stat.coords[:, 1]] = l + return res + + +class AddPointGuidanceSignald(RandomizableTransform): + def __init__(self, image="image", label="label", others="others", drop_rate=0.5, jitter_range=3): + super().__init__() + + self.image = image + self.label = label + self.others = others + self.drop_rate = drop_rate + self.jitter_range = jitter_range + + def __call__(self, data): + d = dict(data) + + image = d[self.image] + mask = d[self.label] + others = d[self.others] + + inc_sig = self.inclusion_map(mask[0]) + exc_sig = self.exclusion_map(others[0], drop_rate=self.drop_rate, jitter_range=self.jitter_range) + + image = np.concatenate((image, inc_sig[np.newaxis, ...], exc_sig[np.newaxis, ...]), axis=0) + d[self.image] = image + return d + + @staticmethod + def inclusion_map(mask): + point_mask = np.zeros_like(mask) + indices = np.argwhere(mask > 0) + if len(indices) > 0: + idx = np.random.randint(0, len(indices)) + point_mask[indices[idx, 0], indices[idx, 1]] = 1 + + return point_mask + + @staticmethod + def exclusion_map(others, jitter_range=3, drop_rate=0.5): + point_mask = np.zeros_like(others) + max_x = point_mask.shape[0] - 1 + max_y = point_mask.shape[1] - 1 + + stats = skimage.measure.regionprops(others) + for stat in stats: + x, y = stat.centroid + # random drop + if np.random.choice([True, False], p=[drop_rate, 1 - drop_rate]): + continue + + # random jitter + x = int(math.floor(x)) + random.randint(a=-jitter_range, b=jitter_range) + y = int(math.floor(y)) + random.randint(a=-jitter_range, b=jitter_range) + x = min(max(0, x), max_x) + y = min(max(0, y), max_y) + point_mask[x, y] = 1 + + return point_mask diff --git a/sample-apps/pathology/lib/utils.py b/sample-apps/pathology/lib/utils.py index 3b898730a..e4eb7aca6 100644 --- a/sample-apps/pathology/lib/utils.py +++ b/sample-apps/pathology/lib/utils.py @@ -1,33 +1,51 @@ +import copy import logging +import math import os import random import shutil -import xml.etree.ElementTree as ET +import xml.etree.ElementTree from io import BytesIO from math import ceil import cv2 import numpy as np import openslide +from monai.transforms import LoadImage from PIL import Image +from skimage.measure import regionprops from tqdm import tqdm from monailabel.datastore.dsa import DSADatastore from monailabel.datastore.local import LocalDatastore +from monailabel.interfaces.datastore import Datastore from monailabel.utils.others.generic import get_basename logger = logging.getLogger(__name__) -def split_dataset(datastore, cache_dir, source, groups, tile_size, max_region=(10240, 10240), limit=0, randomize=True): +def split_dataset( + datastore: Datastore, cache_dir, source, groups, tile_size, max_region=(10240, 10240), limit=0, randomize=True +): ds = datastore.datalist() shutil.rmtree(cache_dir, ignore_errors=True) - if source == "pannuke": + if source == "none": + pass + elif source == "pannuke": image = np.load(ds[0]["image"]) if len(ds) == 1 else None if image is not None and len(image.shape) > 3: logger.info(f"PANNuke (For Developer Mode only):: Split data; groups: {groups}") ds = split_pannuke_dataset(ds[0]["image"], ds[0]["label"], cache_dir, groups) + elif source == "nuclick": + logger.info("Split data based on each nuclei") + ds_new = [] + for d in tqdm(ds): + ds_new.extend(split_nuclei_dataset(d)) + if 0 < limit < len(ds_new): + ds_new = ds_new[:limit] + break + ds = ds_new else: logger.info(f"Split data based on tile size: {tile_size}; groups: {groups}") ds_new = [] @@ -156,7 +174,7 @@ def split_local_dataset(datastore, d, output_dir, groups, tile_size, max_region= points = [] polygons = {g: [] for g in groups} - annotations_xml = ET.parse(d["label"]).getroot() + annotations_xml = xml.etree.ElementTree.parse(d["label"]).getroot() for annotation in annotations_xml.iter("Annotation"): g = annotation.get("PartOfGroup") g = g if g else "None" @@ -183,6 +201,31 @@ def split_local_dataset(datastore, d, output_dir, groups, tile_size, max_region= return dataset_json +def split_nuclei_dataset(d, centroid_key="centroid", mask_value_key="mask_value", min_area=5): + dataset_json = [] + + mask = LoadImage(image_only=True, dtype=np.uint8)(d["label"]) + _, labels, _, _ = cv2.connectedComponentsWithStats(mask, 4, cv2.CV_32S) + + stats = regionprops(labels) + for stat in stats: + if stat.area < min_area: + logger.debug(f"++++ Ignored label with smaller area => ( {stat.area} < {min_area})") + continue + + x, y = stat.centroid + x = int(math.floor(x)) + y = int(math.floor(y)) + + item = copy.deepcopy(d) + item[centroid_key] = (x, y) + item[mask_value_key] = stat.label + + # logger.info(f"{d['label']} => {len(stats)} => {mask.shape} => {stat.label}") + dataset_json.append(item) + return dataset_json + + def _group_item(groups, d, output_dir): groups = groups if groups else dict() groups = [groups] if isinstance(groups, str) else groups @@ -310,7 +353,7 @@ def main_dsa(): datastore = DSADatastore(api_url, folder, api_key, annotation_groups, asset_store_path) print(json.dumps(datastore.datalist(), indent=2)) - split_dataset(datastore, "/localhome/sachi/Downloads/dsa/mostly_tumor", None, annotation_groups, (256, 256)) + split_dataset(datastore, "/localhome/sachi/Downloads/dsa/mostly_tumor", "", annotation_groups, (256, 256)) def main_nuke(): @@ -322,8 +365,15 @@ def main_nuke(): datefmt="%Y-%m-%d %H:%M:%S", ) - datastore = LocalDatastore("/localhome/sachi/Data/Pathology/PanNuke", extensions=("*.nii.gz", "*.nii", "*.npy")) - split_dataset(datastore, "/localhome/sachi/Data/Pathology/PanNukeF", "pannuke", "Nuclei", None) + datastore = LocalDatastore("/localhome/sachi/Datasets/pannuke", extensions=("*.npy")) + labels = { + "Neoplastic cells": 1, + "Inflammatory": 2, + "Connective/Soft tissue cells": 3, + "Dead Cells": 4, + "Epithelial": 5, + } + split_dataset(datastore, "/localhome/sachi/Datasets/pannukeF", "pannuke", labels, None) def main_local(): @@ -341,9 +391,24 @@ def main_local(): datastore = LocalDatastore("C:\\Projects\\Pathology\\Test", extensions=("*.svs", "*.xml")) print(json.dumps(datastore.datalist(), indent=2)) - split_dataset(datastore, "C:\\Projects\\Pathology\\TestF", None, annotation_groups, (256, 256)) + split_dataset(datastore, "C:\\Projects\\Pathology\\TestF", "", annotation_groups, (256, 256)) # print(json.dumps(ds, indent=2)) +def main_nuclei(): + from monailabel.datastore.local import LocalDatastore + + logging.basicConfig( + level=logging.INFO, + format="[%(asctime)s] [%(process)s] [%(threadName)s] [%(levelname)s] (%(name)s:%(lineno)d) - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + + # s = "/localhome/sachi/Datasets/NuClick" + s = "/localhome/sachi/Datasets/pannukeF" + datastore = LocalDatastore(s, extensions=("*.png", "*.npy")) + split_dataset(datastore, None, "nuclick", None, None, limit=0) + + if __name__ == "__main__": - main_local() + main_nuclei() diff --git a/sample-apps/pathology/main.py b/sample-apps/pathology/main.py index 0688b3fe5..fd959dfa8 100644 --- a/sample-apps/pathology/main.py +++ b/sample-apps/pathology/main.py @@ -160,14 +160,9 @@ def main(): datefmt="%Y-%m-%d %H:%M:%S", ) - run_train = False + run_train = True home = str(Path.home()) - if run_train: - # studies = f"{home}/Data/Pathology/PanNuke" - studies = "http://0.0.0.0:8080/api/v1" - else: - # studies = f"{home}/Data/Pathology/Test" - studies = "C:\\Projects\\Pathology\\Test" + studies = f"{home}/Datasets/pannukeF" parser = argparse.ArgumentParser() parser.add_argument("-s", "--studies", default=studies) @@ -176,12 +171,32 @@ def main(): app_dir = os.path.dirname(__file__) studies = args.studies - app = MyApp(app_dir, studies, {}) + app = MyApp(app_dir, studies, {"roi_size": "[1024,1024]", "preload": "false"}) if run_train: - train(app) + train_nuclick(app) else: - infer_nuclick(app) - # infer_wsi(app) + # infer_nuclick(app) + infer_wsi(app) + + +def train_nuclick(app): + model = "nuclick" + app.train( + request={ + "name": "train_01", + "model": model, + "max_epochs": 10, + "dataset": "PersistentDataset", # PersistentDataset, CacheDataset + "train_batch_size": 128, + "val_batch_size": 64, + "multi_gpu": True, + "val_split": 0.2, + "dataset_source": "none", + "dataset_limit": 0, + "pretrained": False, + "n_saved": 10, + }, + ) def train(app): @@ -241,10 +256,10 @@ def infer_wsi(app): home = str(Path.home()) - root_dir = f"{home}/Data/Pathology" + root_dir = f"{home}/Datasets/" image = "TCGA-02-0010-01Z-00-DX4.07de2e55-a8fe-40ee-9e98-bcb78050b9f7" - output = "dsa" + output = "asap" # slide = openslide.OpenSlide(f"{app.studies}/{image}.svs") # img = slide.read_region((7737, 20086), 0, (2048, 2048)).convert("RGB") @@ -259,7 +274,7 @@ def infer_wsi(app): "level": 0, "location": [0, 0], "size": [0, 0], - "tile_size": [2048, 2048], + "tile_size": [1024, 1024], "min_poly_area": 40, "gpus": "all", "multi_gpu": True, From 83ebf301df355624ce2ec70b29f496cd6f72715b Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Mon, 9 May 2022 17:38:24 +0200 Subject: [PATCH 03/24] Add NRRD writer Signed-off-by: Markus Hinsche Co-authored-by: Janis Vahldiek Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 78 ++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index a85cdf542..bad5342bf 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -10,7 +10,9 @@ # limitations under the License. import logging import tempfile +from typing import Dict, List +import nrrd import itk import numpy as np from monai.data import write_nifti @@ -58,6 +60,77 @@ def write_itk(image_np, output_file, affine, dtype, compress): itk.imwrite(result_image, output_file, compress) + +def write_seg_nrrd(image_np: np.ndarray, + output_file: str, + dtype: type, + affine: np.ndarray, + labels: List[str], + color_map: Dict[str, List[float]] = None, + index_order: str = 'C', # 'C' or 'F' + space: str = 'left-posterior-superior', + ) -> None: + """Write seg.nrrd file. + + Args: + image_np: Image as numpy ndarray + output_file: Output file path that the nrrd file should be saved to + dtype: numpy type e.g. float32 + affine: Affine matrix + labels: Labels of image segment which will be written to the nrrd header + color_map: Mapping from segment_name(str) to it's color e.g. {'heart': [255/255, 244/255, 209/255]} + index_order: Either 'C' or 'F' (see nrrd.write() documentation) + + Raises: + ValueError: In case affine is not provided + """ + if len(image_np.shape) > 2: + image_np = image_np.transpose().copy() + if dtype: + image_np = image_np.astype(dtype) + + header = {} + for i, segment_name in enumerate(labels): + header.update({ + f'Segment{i}_ID': segment_name, + f'Segment{i}_Name': segment_name, + }) + if color_map is not None: + header[f'Segment{i}_Color'] = ' '.join(list(map(str, color_map[segment_name]))) + + if affine is None: + raise ValueError("Affine matrix has to be defined") + + convert_aff_mat = np.diag([-1, -1, 1, 1]) + kinds = ['list', 'domain', 'domain', 'domain'] + if affine.shape[0] == 3: + convert_aff_mat = np.diag([-1, -1, 1]) + kinds = ['list', 'domain', 'domain'] + affine = convert_aff_mat @ affine + + _origin_key = (slice(-1), -1) + origin = affine[_origin_key] + + space_directions = np.array([ + [np.nan, np.nan, np.nan], + affine[0, :3], + affine[1, :3], + affine[2, :3], + ]) + + header.update({ + 'kinds': kinds, + 'space directions': space_directions, + 'space origin': origin, + 'space': space, + }) + nrrd.write(output_file, + image_np, + header=header, + index_order=index_order, + ) + + class Writer: def __init__( self, @@ -108,6 +181,11 @@ def __call__(self, data): if self.nibabel and ext.lower() in [".nii", ".nii.gz"]: logger.debug("Using MONAI write_nifti...") write_nifti(image_np, output_file, affine=affine, output_dtype=dtype) + elif ext.lower() in [".seg.nrrd"]: + labels = data.get("labels") + color_map = data.get("color_map") + logger.debug("Using MONAI write_seg_nrrd...") + write_seg_nrrd(image_np, output_file, dtype, affine, labels, color_map) else: write_itk(image_np, output_file, affine, dtype, compress) From b8a5f81fc980afada5d918d3e9874d675ce76b9d Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Mon, 9 May 2022 17:38:55 +0200 Subject: [PATCH 04/24] Add unit test for NRRD writer Signed-off-by: Markus Hinsche --- requirements.txt | 1 + tests/unit/transform/test_writer.py | 34 +++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/requirements.txt b/requirements.txt index e10f201b6..2b9b6e5af 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,5 +26,6 @@ opencv-python-headless==4.5.5.64 Shapely==1.8.1.post1 girder_client==3.1.8 numpymaxflow==0.0.2 +pynrrd==0.4.2 #sudo apt-get install openslide-tools -y diff --git a/tests/unit/transform/test_writer.py b/tests/unit/transform/test_writer.py index 3df178b7b..fe27a7919 100644 --- a/tests/unit/transform/test_writer.py +++ b/tests/unit/transform/test_writer.py @@ -1,6 +1,7 @@ import os import pathlib import unittest +import nrrd import numpy as np from parameterized import parameterized @@ -18,6 +19,12 @@ }, ] +COLOR_MAP = { + # according to getLabelColor() [https://github.com/Project-MONAI/MONAILabel/blob/6cc72c542c9bc6c5181af89550e7e397537d74e3/plugins/slicer/MONAILabel/MONAILabel.py#L1485] # noqa + 'lung': [128/255, 174/255, 128/255], # green + 'heart': [206/255, 110/255, 84/255], # red +} + class TestWriter(unittest.TestCase): @parameterized.expand([WRITER_DATA]) @@ -29,6 +36,33 @@ def test_nifti(self, args, input_data): file_ext = "".join(pathlib.Path(input_data["image_path"]).suffixes) self.assertIn(file_ext.lower(), [".nii", ".nii.gz"]) + @parameterized.expand([WRITER_DATA]) + def test_seg_nrrd(self, args, input_data): + args.update({"nibabel": False}) + input_data["image_path"] = "fakepath.seg.nrrd" + input_data["labels"] = ["heart", "lung"] + input_data["color_map"] = COLOR_MAP + + output_file, data = Writer(**args)(input_data) + self.assertEqual(os.path.exists(output_file), True) + arr_full, header = nrrd.read(output_file) + + # DEBUG + print(header) + + space_directions_expected = np.array([[np.nan, np.nan, np.nan], + [-1., 0., 0.], + [ 0., -1., 0.], + [ 0., 0., 1.]]) + self.assertTrue(np.array_equal(header['space directions'], space_directions_expected, equal_nan=True)) + + self.assertEqual(header['kinds'], ['list', 'domain', 'domain', 'domain']) + self.assertEqual(header['Segment1_ID'], 'lung') + self.assertEqual(header['Segment1_Color'], " ".join(map(str, COLOR_MAP['lung']))) + + file_ext = "".join(pathlib.Path(input_data["image_path"]).suffixes) + self.assertIn(file_ext.lower(), [".seg.nrrd"]) + @parameterized.expand([WRITER_DATA]) def test_itk(self, args, input_data): args.update({"nibabel": False}) From 02359fe2a42dc6a4471cb5a6c484513b10e1b617 Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Mon, 9 May 2022 17:44:11 +0200 Subject: [PATCH 05/24] Polish Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index bad5342bf..f9240cd4c 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -67,7 +67,7 @@ def write_seg_nrrd(image_np: np.ndarray, affine: np.ndarray, labels: List[str], color_map: Dict[str, List[float]] = None, - index_order: str = 'C', # 'C' or 'F' + index_order: str = 'C', space: str = 'left-posterior-superior', ) -> None: """Write seg.nrrd file. From 4f6fee802a84c67b8f55331cbb0e9ba98a8cbb5a Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Tue, 10 May 2022 15:12:56 +0200 Subject: [PATCH 06/24] Add labels to data object, Check for label list Signed-off-by: Markus Hinsche Co-authored-by: Janis Vahldiek Signed-off-by: Markus Hinsche --- monailabel/interfaces/tasks/infer.py | 2 ++ monailabel/transform/writer.py | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/monailabel/interfaces/tasks/infer.py b/monailabel/interfaces/tasks/infer.py index fb442f1ec..7dd74657a 100644 --- a/monailabel/interfaces/tasks/infer.py +++ b/monailabel/interfaces/tasks/infer.py @@ -433,6 +433,8 @@ def writer(self, data, extension=None, dtype=None): data["result_extension"] = extension if dtype is not None: data["result_dtype"] = dtype + if self.labels is not None: + data["labels"] = self.labels writer = Writer(label=self.output_label_key, json=self.output_json_key) return writer(data) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index f9240cd4c..f74b18d42 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -10,7 +10,7 @@ # limitations under the License. import logging import tempfile -from typing import Dict, List +from typing import Dict, List, Iterable import nrrd import itk @@ -89,6 +89,9 @@ def write_seg_nrrd(image_np: np.ndarray, if dtype: image_np = image_np.astype(dtype) + if not isinstance(labels, Iterable): + raise ValueError("Labels have to be defined, e.g. as a list") + header = {} for i, segment_name in enumerate(labels): header.update({ From 3355bde9c46f53e35034565e365352ba1b35c5c2 Mon Sep 17 00:00:00 2001 From: Janis Vahldiek Date: Wed, 11 May 2022 11:58:12 +0200 Subject: [PATCH 07/24] update docstrings Signed-off-by: Janis Vahldiek Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index f74b18d42..8baf08e55 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -70,7 +70,7 @@ def write_seg_nrrd(image_np: np.ndarray, index_order: str = 'C', space: str = 'left-posterior-superior', ) -> None: - """Write seg.nrrd file. + """Write multi-channel .seg.nrrd file. Args: image_np: Image as numpy ndarray @@ -83,6 +83,7 @@ def write_seg_nrrd(image_np: np.ndarray, Raises: ValueError: In case affine is not provided + ValueError: In case labels are not provided """ if len(image_np.shape) > 2: image_np = image_np.transpose().copy() From f63f3e1aa47dc365d2b18cf6f84e290fec05ad22 Mon Sep 17 00:00:00 2001 From: Janis Vahldiek Date: Wed, 11 May 2022 12:13:16 +0200 Subject: [PATCH 08/24] change writer selection write_seg_nrrd() is only needed for 4D multi-channel label arrays. Everything else can be handled by ITK of nifty writer. Signed-off-by: Janis Vahldiek Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index 8baf08e55..ebf4f1afb 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -181,15 +181,18 @@ def __call__(self, data): output_file = tempfile.NamedTemporaryFile(suffix=ext).name logger.debug(f"Saving Image to: {output_file}") - # Issue with slicer:: https://discourse.itk.org/t/saving-non-orthogonal-volume-in-nifti-format/2760/22 - if self.nibabel and ext.lower() in [".nii", ".nii.gz"]: - logger.debug("Using MONAI write_nifti...") - write_nifti(image_np, output_file, affine=affine, output_dtype=dtype) - elif ext.lower() in [".seg.nrrd"]: + if image_np.shape == 4 and image.np.shape[-1] > 1: + if ext != ".seg.nrrd": + logger.debug(f"Using extension '{ext}' with multi-channel 4D label will probably fail" + + "Consider to use extension '.seg.nrrd'") labels = data.get("labels") color_map = data.get("color_map") - logger.debug("Using MONAI write_seg_nrrd...") + logger.debug("Using write_seg_nrrd...") write_seg_nrrd(image_np, output_file, dtype, affine, labels, color_map) + # Issue with slicer:: https://discourse.itk.org/t/saving-non-orthogonal-volume-in-nifti-format/2760/22 + elif self.nibabel and ext.lower() in [".nii", ".nii.gz"]: + logger.debug("Using MONAI write_nifti...") + write_nifti(image_np, output_file, affine=affine, output_dtype=dtype) else: write_itk(image_np, output_file, affine, dtype, compress) From f5ac9c6d13ee9ba8fd605de3a7984356170b6db0 Mon Sep 17 00:00:00 2001 From: Janis Vahldiek Date: Wed, 11 May 2022 12:17:50 +0200 Subject: [PATCH 09/24] polish write_seg_nrrd() will only be used for 4D multi-channel label arrays. Thus, removed unnecessary code. Signed-off-by: Janis Vahldiek Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index ebf4f1afb..c2a69c6e1 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -85,8 +85,7 @@ def write_seg_nrrd(image_np: np.ndarray, ValueError: In case affine is not provided ValueError: In case labels are not provided """ - if len(image_np.shape) > 2: - image_np = image_np.transpose().copy() + image_np = image_np.transpose().copy() if dtype: image_np = image_np.astype(dtype) @@ -105,11 +104,9 @@ def write_seg_nrrd(image_np: np.ndarray, if affine is None: raise ValueError("Affine matrix has to be defined") - convert_aff_mat = np.diag([-1, -1, 1, 1]) kinds = ['list', 'domain', 'domain', 'domain'] - if affine.shape[0] == 3: - convert_aff_mat = np.diag([-1, -1, 1]) - kinds = ['list', 'domain', 'domain'] + + convert_aff_mat = np.diag([-1, -1, 1, 1]) affine = convert_aff_mat @ affine _origin_key = (slice(-1), -1) From 03b8744cc47ba56c9ae3569f0a53d7399ce8ce72 Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Wed, 11 May 2022 20:37:08 +0200 Subject: [PATCH 10/24] Fix typo Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index c2a69c6e1..85b2b4b4f 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -70,11 +70,11 @@ def write_seg_nrrd(image_np: np.ndarray, index_order: str = 'C', space: str = 'left-posterior-superior', ) -> None: - """Write multi-channel .seg.nrrd file. + """Write multi-channel seg.nrrd file. Args: image_np: Image as numpy ndarray - output_file: Output file path that the nrrd file should be saved to + output_file: Output file path that the seg.nrrd file should be saved to dtype: numpy type e.g. float32 affine: Affine matrix labels: Labels of image segment which will be written to the nrrd header @@ -178,7 +178,7 @@ def __call__(self, data): output_file = tempfile.NamedTemporaryFile(suffix=ext).name logger.debug(f"Saving Image to: {output_file}") - if image_np.shape == 4 and image.np.shape[-1] > 1: + if image_np.shape == 4 and image_np.shape[-1] > 1: if ext != ".seg.nrrd": logger.debug(f"Using extension '{ext}' with multi-channel 4D label will probably fail" + "Consider to use extension '.seg.nrrd'") From 31b8852c110a55b0804870dcead9e52202fe4b04 Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Wed, 11 May 2022 20:49:38 +0200 Subject: [PATCH 11/24] Add pynrrd==0.4.2 to setup.cfg Signed-off-by: Markus Hinsche Signed-off-by: Markus Hinsche --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index ad36443f6..7253e6dd3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -51,6 +51,7 @@ install_requires = Shapely==1.8.1.post1 girder_client==3.1.8 numpymaxflow==0.0.2 + pynrrd==0.4.2 [flake8] select = B,C,E,F,N,P,T4,W,B9 From f323fe36ce1130bb5f8053190d00c835cc6aa3e4 Mon Sep 17 00:00:00 2001 From: Janis Vahldiek Date: Thu, 12 May 2022 10:40:47 +0200 Subject: [PATCH 12/24] fix check for multi-channel label Signed-off-by: Janis Vahldiek Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index 85b2b4b4f..b570f274d 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -178,7 +178,7 @@ def __call__(self, data): output_file = tempfile.NamedTemporaryFile(suffix=ext).name logger.debug(f"Saving Image to: {output_file}") - if image_np.shape == 4 and image_np.shape[-1] > 1: + if len(image_np.shape == 4) and image_np.shape[-1] > 1: if ext != ".seg.nrrd": logger.debug(f"Using extension '{ext}' with multi-channel 4D label will probably fail" + "Consider to use extension '.seg.nrrd'") From 9c2d05528c9dcabc90b04efb87e6fb0bb2dadb18 Mon Sep 17 00:00:00 2001 From: Janis Vahldiek Date: Thu, 12 May 2022 10:45:25 +0200 Subject: [PATCH 13/24] fix unit test Signed-off-by: Janis Vahldiek Signed-off-by: Markus Hinsche --- tests/unit/transform/test_writer.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/unit/transform/test_writer.py b/tests/unit/transform/test_writer.py index fe27a7919..2d56bd6b5 100644 --- a/tests/unit/transform/test_writer.py +++ b/tests/unit/transform/test_writer.py @@ -19,6 +19,8 @@ }, ] +MULTI_CHANNEL_DATA = np.array([[[[1, 0], [0, 1], [1, 0]], [[0, 1], [1, 0], [0, 1]]]]).astype(np.float32) + COLOR_MAP = { # according to getLabelColor() [https://github.com/Project-MONAI/MONAILabel/blob/6cc72c542c9bc6c5181af89550e7e397537d74e3/plugins/slicer/MONAILabel/MONAILabel.py#L1485] # noqa 'lung': [128/255, 174/255, 128/255], # green @@ -39,7 +41,8 @@ def test_nifti(self, args, input_data): @parameterized.expand([WRITER_DATA]) def test_seg_nrrd(self, args, input_data): args.update({"nibabel": False}) - input_data["image_path"] = "fakepath.seg.nrrd" + input_data["pred"] = MULTI_CHANNEL_DATA + input_data["result_extension"] = ".seg.nrrd" input_data["labels"] = ["heart", "lung"] input_data["color_map"] = COLOR_MAP @@ -60,7 +63,7 @@ def test_seg_nrrd(self, args, input_data): self.assertEqual(header['Segment1_ID'], 'lung') self.assertEqual(header['Segment1_Color'], " ".join(map(str, COLOR_MAP['lung']))) - file_ext = "".join(pathlib.Path(input_data["image_path"]).suffixes) + file_ext = "".join(pathlib.Path(output_file).suffixes) self.assertIn(file_ext.lower(), [".seg.nrrd"]) @parameterized.expand([WRITER_DATA]) From 5364d170f4bcf6b1f2e108651df01b6e6b2a1acc Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Thu, 12 May 2022 13:16:29 +0200 Subject: [PATCH 14/24] Fix multichannel problem Signed-off-by: Markus Hinsche Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index b570f274d..c6ed5cdad 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -178,7 +178,7 @@ def __call__(self, data): output_file = tempfile.NamedTemporaryFile(suffix=ext).name logger.debug(f"Saving Image to: {output_file}") - if len(image_np.shape == 4) and image_np.shape[-1] > 1: + if self.is_multichannel_image(image_np): if ext != ".seg.nrrd": logger.debug(f"Using extension '{ext}' with multi-channel 4D label will probably fail" + "Consider to use extension '.seg.nrrd'") @@ -195,6 +195,9 @@ def __call__(self, data): return output_file, output_json + def is_multichannel_image(self, image_np): + return len(image_np.shape) == 4 and image_np.shape[-1] > 1 + class ClassificationWriter: def __init__(self, label="pred", label_names=None): From 4cf50dd113aaa33631a3822ca9713bcfb1696268 Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Thu, 12 May 2022 14:09:30 +0200 Subject: [PATCH 15/24] Reformat code Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 85 ++++++++++++++++------------- tests/unit/transform/test_writer.py | 21 ++++--- 2 files changed, 57 insertions(+), 49 deletions(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index c6ed5cdad..9c2c30078 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -10,10 +10,10 @@ # limitations under the License. import logging import tempfile -from typing import Dict, List, Iterable +from typing import Dict, Iterable, List -import nrrd import itk +import nrrd import numpy as np from monai.data import write_nifti @@ -60,16 +60,16 @@ def write_itk(image_np, output_file, affine, dtype, compress): itk.imwrite(result_image, output_file, compress) - -def write_seg_nrrd(image_np: np.ndarray, - output_file: str, - dtype: type, - affine: np.ndarray, - labels: List[str], - color_map: Dict[str, List[float]] = None, - index_order: str = 'C', - space: str = 'left-posterior-superior', - ) -> None: +def write_seg_nrrd( + image_np: np.ndarray, + output_file: str, + dtype: type, + affine: np.ndarray, + labels: List[str], + color_map: Dict[str, List[float]] = None, + index_order: str = "C", + space: str = "left-posterior-superior", +) -> None: """Write multi-channel seg.nrrd file. Args: @@ -94,17 +94,19 @@ def write_seg_nrrd(image_np: np.ndarray, header = {} for i, segment_name in enumerate(labels): - header.update({ - f'Segment{i}_ID': segment_name, - f'Segment{i}_Name': segment_name, - }) + header.update( + { + f"Segment{i}_ID": segment_name, + f"Segment{i}_Name": segment_name, + } + ) if color_map is not None: - header[f'Segment{i}_Color'] = ' '.join(list(map(str, color_map[segment_name]))) + header[f"Segment{i}_Color"] = " ".join(list(map(str, color_map[segment_name]))) if affine is None: raise ValueError("Affine matrix has to be defined") - kinds = ['list', 'domain', 'domain', 'domain'] + kinds = ["list", "domain", "domain", "domain"] convert_aff_mat = np.diag([-1, -1, 1, 1]) affine = convert_aff_mat @ affine @@ -112,24 +114,29 @@ def write_seg_nrrd(image_np: np.ndarray, _origin_key = (slice(-1), -1) origin = affine[_origin_key] - space_directions = np.array([ - [np.nan, np.nan, np.nan], - affine[0, :3], - affine[1, :3], - affine[2, :3], - ]) - - header.update({ - 'kinds': kinds, - 'space directions': space_directions, - 'space origin': origin, - 'space': space, - }) - nrrd.write(output_file, - image_np, - header=header, - index_order=index_order, - ) + space_directions = np.array( + [ + [np.nan, np.nan, np.nan], + affine[0, :3], + affine[1, :3], + affine[2, :3], + ] + ) + + header.update( + { + "kinds": kinds, + "space directions": space_directions, + "space origin": origin, + "space": space, + } + ) + nrrd.write( + output_file, + image_np, + header=header, + index_order=index_order, + ) class Writer: @@ -180,8 +187,10 @@ def __call__(self, data): if self.is_multichannel_image(image_np): if ext != ".seg.nrrd": - logger.debug(f"Using extension '{ext}' with multi-channel 4D label will probably fail" + - "Consider to use extension '.seg.nrrd'") + logger.debug( + f"Using extension '{ext}' with multi-channel 4D label will probably fail" + + "Consider to use extension '.seg.nrrd'" + ) labels = data.get("labels") color_map = data.get("color_map") logger.debug("Using write_seg_nrrd...") diff --git a/tests/unit/transform/test_writer.py b/tests/unit/transform/test_writer.py index 2d56bd6b5..4eca71ab5 100644 --- a/tests/unit/transform/test_writer.py +++ b/tests/unit/transform/test_writer.py @@ -1,8 +1,8 @@ import os import pathlib import unittest -import nrrd +import nrrd import numpy as np from parameterized import parameterized @@ -23,8 +23,8 @@ COLOR_MAP = { # according to getLabelColor() [https://github.com/Project-MONAI/MONAILabel/blob/6cc72c542c9bc6c5181af89550e7e397537d74e3/plugins/slicer/MONAILabel/MONAILabel.py#L1485] # noqa - 'lung': [128/255, 174/255, 128/255], # green - 'heart': [206/255, 110/255, 84/255], # red + "lung": [128 / 255, 174 / 255, 128 / 255], # green + "heart": [206 / 255, 110 / 255, 84 / 255], # red } @@ -53,15 +53,14 @@ def test_seg_nrrd(self, args, input_data): # DEBUG print(header) - space_directions_expected = np.array([[np.nan, np.nan, np.nan], - [-1., 0., 0.], - [ 0., -1., 0.], - [ 0., 0., 1.]]) - self.assertTrue(np.array_equal(header['space directions'], space_directions_expected, equal_nan=True)) + space_directions_expected = np.array( + [[np.nan, np.nan, np.nan], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]] + ) + self.assertTrue(np.array_equal(header["space directions"], space_directions_expected, equal_nan=True)) - self.assertEqual(header['kinds'], ['list', 'domain', 'domain', 'domain']) - self.assertEqual(header['Segment1_ID'], 'lung') - self.assertEqual(header['Segment1_Color'], " ".join(map(str, COLOR_MAP['lung']))) + self.assertEqual(header["kinds"], ["list", "domain", "domain", "domain"]) + self.assertEqual(header["Segment1_ID"], "lung") + self.assertEqual(header["Segment1_Color"], " ".join(map(str, COLOR_MAP["lung"]))) file_ext = "".join(pathlib.Path(output_file).suffixes) self.assertIn(file_ext.lower(), [".seg.nrrd"]) From 898556283670241ef65be2a38dc4d258fe566e2c Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Thu, 12 May 2022 14:14:43 +0200 Subject: [PATCH 16/24] Make mypy happy Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index 9c2c30078..d1ff57f94 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -10,7 +10,7 @@ # limitations under the License. import logging import tempfile -from typing import Dict, Iterable, List +from typing import Any, Dict, Iterable, List, Optional import itk import nrrd @@ -66,7 +66,7 @@ def write_seg_nrrd( dtype: type, affine: np.ndarray, labels: List[str], - color_map: Dict[str, List[float]] = None, + color_map: Optional[Dict[str, List[float]]] = None, index_order: str = "C", space: str = "left-posterior-superior", ) -> None: @@ -92,7 +92,7 @@ def write_seg_nrrd( if not isinstance(labels, Iterable): raise ValueError("Labels have to be defined, e.g. as a list") - header = {} + header: Dict[str, Any] = {} for i, segment_name in enumerate(labels): header.update( { From 38b2dea758ab9948cc23f4eec815696c853db97b Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Thu, 12 May 2022 14:22:44 +0200 Subject: [PATCH 17/24] Fix merge conflict Signed-off-by: Markus Hinsche --- sample-apps/pathology/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sample-apps/pathology/main.py b/sample-apps/pathology/main.py index cdef4c885..3c1b3a6b7 100644 --- a/sample-apps/pathology/main.py +++ b/sample-apps/pathology/main.py @@ -160,7 +160,7 @@ def main(): datefmt="%Y-%m-%d %H:%M:%S", ) - run_train = True + run_train = False home = str(Path.home()) studies = f"{home}/Datasets/Pathology" @@ -257,7 +257,7 @@ def infer_wsi(app): root_dir = f"{home}/Datasets/" image = "TCGA-02-0010-01Z-00-DX4.07de2e55-a8fe-40ee-9e98-bcb78050b9f7" - output = "asap" + output = "dsa" # slide = openslide.OpenSlide(f"{app.studies}/{image}.svs") # img = slide.read_region((7737, 20086), 0, (2048, 2048)).convert("RGB") From 5a5f80fb116520376c955ab3f75c33631f178c6e Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Tue, 17 May 2022 11:03:20 +0200 Subject: [PATCH 18/24] Use logger.warning instead logger.debug Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index 1633ff448..26e917680 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -187,7 +187,7 @@ def __call__(self, data): if self.is_multichannel_image(image_np): if ext != ".seg.nrrd": - logger.debug( + logger.warning( f"Using extension '{ext}' with multi-channel 4D label will probably fail" + "Consider to use extension '.seg.nrrd'" ) From b1a0c40881e850ebfa1a4669a1f8278ad0baac60 Mon Sep 17 00:00:00 2001 From: Andres Diaz-Pinto Date: Sun, 15 May 2022 20:13:34 +0100 Subject: [PATCH 19/24] Add Active Learning strategies to DeepEdit (#782) * Add Active Learning strategies to DeepEdit Signed-off-by: Andres Diaz-Pinto * Update readme - commands Active Learning strategies Signed-off-by: Andres Diaz-Pinto Signed-off-by: Markus Hinsche --- monailabel/config.py | 2 +- sample-apps/radiology/README.md | 26 ++++- sample-apps/radiology/lib/configs/deepedit.py | 108 ++++++++++++++---- 3 files changed, 107 insertions(+), 29 deletions(-) diff --git a/monailabel/config.py b/monailabel/config.py index 3117f372e..349981fa1 100644 --- a/monailabel/config.py +++ b/monailabel/config.py @@ -49,7 +49,7 @@ class Settings(BaseSettings): MONAI_LABEL_SERVER_PORT: int = 8000 MONAI_LABEL_CORS_ORIGINS: List[AnyHttpUrl] = [] - MONAI_LABEL_AUTO_UPDATE_SCORING = False + MONAI_LABEL_AUTO_UPDATE_SCORING = True MONAI_LABEL_SESSIONS: bool = True MONAI_LABEL_SESSION_PATH: str = "" diff --git a/sample-apps/radiology/README.md b/sample-apps/radiology/README.md index f78275cc1..751e1b634 100644 --- a/sample-apps/radiology/README.md +++ b/sample-apps/radiology/README.md @@ -64,10 +64,20 @@ This model works for single and multiple label segmentation tasks. - Additional Configs *(pass them as **--conf name value**) while starting MONAILabelServer* -| Name | Values | Description | -|----------------------|--------------------|-----------------------------------------------------------------| -| network | **dynunet**, unetr | Using one of these network and corresponding pretrained weights | -| use_pretrained_model | **true**, false | Disable this NOT to load any pretrained weights | +| Name | Values | Description | +|----------------------|--------------------|--------------------------------------------------------------------| +| network | **dynunet**, unetr | Using one of these network and corresponding pretrained weights | +| use_pretrained_model | **true**, false | Disable this NOT to load any pretrained weights | +| skip_scoring | **true**, false | Disable this to allow scoring methods to be used | +| skip_strategies | **true**, false | Disable this to add active learning strategies | +| epistemic_enabled | true, **false** | Enable Epistemic based Active Learning Strategy | +| epistemic_samples | int | Limit number of samples to run epistemic scoring | +| tta_enabled | true, **false** | Enable TTA (Test Time Augmentation) based Active Learning Strategy | +| tta_samples | int | Limit number of samples to run tta scoring | + +A command example to use active learning strategies with DeepEdit would be: + +> monailabel start_server --app workspace/radiology --studies workspace/images --conf models deepedit --conf skip_scoring false --conf skip_strategies false --conf tta_enabled true - Network > This App uses the DynUNet as the default network. It also comes with pretrained model for [UNETR](https://docs.monai.io/en/latest/networks.html#unetr). @@ -206,11 +216,19 @@ from [NVIDIA Clara](https://catalog.ngc.nvidia.com/models?filters=&orderBy=dateM | Name | Values | Description | |----------------------|-----------------|--------------------------------------------------------------------| | use_pretrained_model | **true**, false | Disable this NOT to load any pretrained weights | +| skip_scoring | **true**, false | Disable this to allow scoring methods to be used | +| skip_strategies | **true**, false | Disable this to add active learning strategies | | epistemic_enabled | true, **false** | Enable Epistemic based Active Learning Strategy | | epistemic_samples | int | Limit number of samples to run epistemic scoring | | tta_enabled | true, **false** | Enable TTA (Test Time Augmentation) based Active Learning Strategy | | tta_samples | int | Limit number of samples to run tta scoring | + +A command example to use active learning strategies with segmentation_spleen would be: + +> monailabel start_server --app workspace/radiology --studies workspace/images --conf models segmentation_spleen --conf skip_scoring false --conf skip_strategies false --conf tta_enabled true + + - Network > This App uses the [UNet](https://docs.monai.io/en/latest/networks.html#unet) as the default network. > Researchers can define their own network or use one of the listed [here](https://docs.monai.io/en/latest/networks.html) diff --git a/sample-apps/radiology/lib/configs/deepedit.py b/sample-apps/radiology/lib/configs/deepedit.py index e080387e5..49d18850e 100644 --- a/sample-apps/radiology/lib/configs/deepedit.py +++ b/sample-apps/radiology/lib/configs/deepedit.py @@ -19,7 +19,15 @@ from monailabel.interfaces.config import TaskConfig from monailabel.interfaces.tasks.infer import InferTask, InferType +from monailabel.interfaces.tasks.scoring import ScoringMethod +from monailabel.interfaces.tasks.strategy import Strategy from monailabel.interfaces.tasks.train import TrainTask +from monailabel.tasks.activelearning.epistemic import Epistemic +from monailabel.tasks.activelearning.tta import TTA +from monailabel.tasks.scoring.dice import Dice +from monailabel.tasks.scoring.epistemic import EpistemicScoring +from monailabel.tasks.scoring.sum import Sum +from monailabel.tasks.scoring.tta import TTAScoring from monailabel.utils.others.generic import download_file logger = logging.getLogger(__name__) @@ -29,6 +37,11 @@ class DeepEdit(TaskConfig): def init(self, name: str, model_dir: str, conf: Dict[str, str], planner: Any, **kwargs): super().init(name, model_dir, conf, planner, **kwargs) + self.epistemic_enabled = None + self.epistemic_samples = None + self.tta_enabled = None + self.tta_samples = None + # Multilabel # self.labels = { # "spleen": 1, @@ -68,31 +81,46 @@ def init(self, name: str, model_dir: str, conf: Dict[str, str], planner: Any, ** # Network if network == "unetr": - self.network = UNETR( - spatial_dims=3, - in_channels=len(self.labels) + self.number_intensity_ch, - out_channels=len(self.labels), - img_size=self.spatial_size, - feature_size=64, - hidden_size=1536, - mlp_dim=3072, - num_heads=48, - pos_embed="conv", - norm_name="instance", - res_block=True, - ) + self.network_params = { + "spatial_dims": 3, + "in_channels": len(self.labels) + self.number_intensity_ch, + "out_channels": len(self.labels), + "img_size": self.spatial_size, + "feature_size": 64, + "hidden_size": 1536, + "mlp_dim": 3072, + "num_heads": 48, + "pos_embed": "conv", + "norm_name": "instance", + "res_block": True, + } + self.network = UNETR(**self.network_params) + self.network_with_dropout = UNETR(**self.network_params, dropout_rate=0.2) + self.find_unused_parameters = False else: - self.network = DynUNet( - spatial_dims=3, - in_channels=len(self.labels) + self.number_intensity_ch, - out_channels=len(self.labels), - kernel_size=[[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], - strides=[[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 1]], - upsample_kernel_size=[[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 1]], - norm_name="instance", - deep_supervision=False, - res_block=True, - ) + self.network_params = { + "spatial_dims": 3, + "in_channels": len(self.labels) + self.number_intensity_ch, + "out_channels": len(self.labels), + "kernel_size": [3, 3, 3, 3, 3, 3], + "strides": [1, 2, 2, 2, 2, [2, 2, 1]], + "upsample_kernel_size": [2, 2, 2, 2, [2, 2, 1]], + "norm_name": "instance", + "deep_supervision": False, + "res_block": True, + } + self.network = DynUNet(**self.network_params) + self.network_with_dropout = DynUNet(**self.network_params, dropout=0.2) + self.find_unused_parameters = False + + # Others + self.epistemic_enabled = strtobool(conf.get("epistemic_enabled", "false")) + self.epistemic_samples = int(conf.get("epistemic_samples", "5")) + logger.info(f"EPISTEMIC Enabled: {self.epistemic_enabled}; Samples: {self.epistemic_samples}") + + self.tta_enabled = strtobool(conf.get("tta_enabled", "false")) + self.tta_samples = int(conf.get("tta_samples", "5")) + logger.info(f"TTA Enabled: {self.tta_enabled}; Samples: {self.tta_samples}") def infer(self) -> Union[InferTask, Dict[str, InferTask]]: return { @@ -125,3 +153,35 @@ def trainer(self) -> Optional[TrainTask]: find_unused_parameters=True, ) return task + + def strategy(self) -> Union[None, Strategy, Dict[str, Strategy]]: + strategies: Dict[str, Strategy] = {} + if self.epistemic_enabled: + strategies[f"{self.name}_epistemic"] = Epistemic() + if self.tta_enabled: + strategies[f"{self.name}_tta"] = TTA() + return strategies + + def scoring_method(self) -> Union[None, ScoringMethod, Dict[str, ScoringMethod]]: + methods: Dict[str, ScoringMethod] = { + "dice": Dice(), + "sum": Sum(), + } + + if self.epistemic_enabled: + methods[f"{self.name}_epistemic"] = EpistemicScoring( + model=self.path, + network=self.network_with_dropout, + transforms=lib.infers.DeepEdit(type=InferType.DEEPEDIT).pre_transforms(), + num_samples=self.epistemic_samples, + ) + if self.tta_enabled: + methods[f"{self.name}_tta"] = TTAScoring( + model=self.path, + network=self.network, + deepedit=True, + num_samples=self.tta_samples, + spatial_size=self.spatial_size, + spacing=self.target_spacing, + ) + return methods From 7c73853928acddeac977389d73167293e053be6d Mon Sep 17 00:00:00 2001 From: Andres Diaz-Pinto Date: Sun, 15 May 2022 23:48:08 +0100 Subject: [PATCH 20/24] Prepare MONAI Label for new monai - DeepEdit transforms (#781) * Prepare MONAI Label for new monai - DeepEdit transforms Signed-off-by: Andres Diaz-Pinto * Add deprecated messages - DeepEdit transforms - interaction Signed-off-by: Andres Diaz-Pinto Co-authored-by: SACHIDANAND ALLE Signed-off-by: Markus Hinsche --- monailabel/deepedit/multilabel/interaction.py | 2 + monailabel/deepedit/multilabel/transforms.py | 58 +++++++++++++++++++ requirements.txt | 2 +- sample-apps/radiology/lib/infers/deepedit.py | 18 +++--- .../lib/infers/segmentation_spleen.py | 4 +- .../radiology/lib/trainers/deepedit.py | 33 +++++------ .../lib/trainers/segmentation_spleen.py | 6 +- setup.cfg | 2 +- 8 files changed, 92 insertions(+), 33 deletions(-) diff --git a/monailabel/deepedit/multilabel/interaction.py b/monailabel/deepedit/multilabel/interaction.py index ba3e0b2bf..e5956e457 100644 --- a/monailabel/deepedit/multilabel/interaction.py +++ b/monailabel/deepedit/multilabel/interaction.py @@ -17,9 +17,11 @@ from monai.engines import SupervisedEvaluator, SupervisedTrainer from monai.engines.utils import IterationEvents from monai.transforms import Compose +from monai.utils import deprecated from monai.utils.enums import CommonKeys +@deprecated(since="0.4", msg_suffix="For Radiology app use monai.apps.deepedit.interaction.Interaction instead") class Interaction: """ Ignite process_function used to introduce interactions (simulation of clicks) for DeepEdit Training/Evaluation. diff --git a/monailabel/deepedit/multilabel/transforms.py b/monailabel/deepedit/multilabel/transforms.py index af432baa3..4f9ba0096 100644 --- a/monailabel/deepedit/multilabel/transforms.py +++ b/monailabel/deepedit/multilabel/transforms.py @@ -19,6 +19,7 @@ from monai.config import KeysCollection from monai.networks.layers import GaussianFilter from monai.transforms.transform import MapTransform, Randomizable, Transform +from monai.utils import deprecated from skimage import measure logger = logging.getLogger(__name__) @@ -28,6 +29,7 @@ distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt") +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.DiscardAddGuidanced instead.") class DiscardAddGuidanced(MapTransform): def __init__( self, @@ -43,6 +45,10 @@ def __init__( :param keys: The ``keys`` parameter will be used to get and set the actual data item to transform :param number_intensity_ch: number of intensity channels :param probability: probability of discarding clicks + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.DiscardAddGuidanced` instead. + """ super().__init__(keys, allow_missing_keys) @@ -73,6 +79,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.NormalizeLabelsInDatasetd instead.") # Transform for multilabel DeepEdit segmentation class NormalizeLabelsInDatasetd(MapTransform): def __init__( @@ -86,6 +93,10 @@ def __init__( :param keys: The ``keys`` parameter will be used to get and set the actual data item to transform :param label_names: all label names + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.NormalizeLabelsInDatasetd` instead. + """ super().__init__(keys, allow_missing_keys) @@ -113,6 +124,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.SingleLabelSelectiond instead.") # One label at a time transform - DeepEdit class SingleLabelSelectiond(MapTransform): def __init__( @@ -126,6 +138,9 @@ def __init__( :param keys: The ``keys`` parameter will be used to get and set the actual data item to transform :param label_names: all label names + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.SingleLabelSelectiond` instead. """ super().__init__(keys, allow_missing_keys) @@ -164,6 +179,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.AddGuidanceSignalDeepEditd instead.") class AddGuidanceSignalCustomd(MapTransform): """ Add Guidance signal for input image. Multilabel DeepEdit @@ -175,6 +191,9 @@ class AddGuidanceSignalCustomd(MapTransform): guidance: key to store guidance. sigma: standard deviation for Gaussian kernel. number_intensity_ch: channel index. + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.AddGuidanceSignalDeepEditd` instead. """ def __init__( @@ -253,6 +272,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.FindAllValidSlicesMissingLabelsd instead.") class FindAllValidSlicesCustomd(MapTransform): """ Find/List all valid slices in the labels. @@ -260,6 +280,9 @@ class FindAllValidSlicesCustomd(MapTransform): Args: label: key to the label source. sids: key to store slices indices having valid label map. + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.FindAllValidSlicesMissingLabelsd` instead. """ def __init__( @@ -301,6 +324,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.AddInitialSeedPointMissingLabelsd instead.") class AddInitialSeedPointCustomd(Randomizable, MapTransform): """ Add random guidance as initial seed point for a given label. @@ -313,6 +337,10 @@ class AddInitialSeedPointCustomd(Randomizable, MapTransform): sids: key that represents lists of valid slice indices for the given label. sid: key that represents the slice to add initial seed point. If not present, random sid will be chosen. connected_regions: maximum connected regions to use for adding initial points. + + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.AddInitialSeedPointMissingLabelsd` instead. """ def __init__( @@ -432,6 +460,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.FindDiscrepancyRegionsDeepEditd instead.") class FindDiscrepancyRegionsCustomd(MapTransform): """ Find discrepancy between prediction and actual during click interactions during training. @@ -440,6 +469,9 @@ class FindDiscrepancyRegionsCustomd(MapTransform): label: key to label source. pred: key to prediction source. discrepancy: key to store discrepancies found between label and prediction. + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.FindDiscrepancyRegionsDeepEditd` instead. """ def __init__( @@ -503,6 +535,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.AddRandomGuidanceDeepEditd instead.") class AddRandomGuidanceCustomd(Randomizable, MapTransform): """ Add random guidance based on discrepancies that were found between label and prediction. @@ -511,6 +544,10 @@ class AddRandomGuidanceCustomd(Randomizable, MapTransform): guidance: key to guidance source, shape (2, N, # of dim) discrepancy: key to discrepancy map between label and prediction shape (2, C, H, W, D) or (2, C, H, W) probability: key to click/interaction probability, shape (1) + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.AddRandomGuidanceDeepEditd` instead. + """ def __init__( @@ -631,6 +668,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.AddGuidanceFromPointsDeepEditd instead.") class AddGuidanceFromPointsCustomd(Transform): """ Add guidance based on user clicks. ONLY WORKS FOR 3D @@ -650,6 +688,10 @@ class AddGuidanceFromPointsCustomd(Transform): For example, to handle key `image`, read/write affine matrices from the metadata `image_meta_dict` dictionary's `affine` field. + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.AddGuidanceFromPointsDeepEditd` instead. + """ def __init__( @@ -698,9 +740,13 @@ def __call__(self, data): return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.ResizeGuidanceMultipleLabelDeepEditd instead.") class ResizeGuidanceMultipleLabelCustomd(Transform): """ Resize the guidance based on cropped vs resized image. + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.ResizeGuidanceMultipleLabelDeepEditd` instead. """ def __init__( @@ -733,10 +779,14 @@ def __call__(self, data): return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.SplitPredsLabeld instead.") class SplitPredsLabeld(MapTransform): """ Split preds and labels for individual evaluation + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.SplitPredsLabeld` instead. + """ def __init__( @@ -779,6 +829,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.AddInitialSeedPointMissingLabelsd instead.") class AddInitialSeedPointMissingLabelsd(Randomizable, MapTransform): """ Add random guidance as initial seed point for a given label. @@ -791,6 +842,9 @@ class AddInitialSeedPointMissingLabelsd(Randomizable, MapTransform): sids: key that represents lists of valid slice indices for the given label. sid: key that represents the slice to add initial seed point. If not present, random sid will be chosen. connected_regions: maximum connected regions to use for adding initial points. + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.AddInitialSeedPointMissingLabelsd` instead. """ def __init__( @@ -912,6 +966,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +@deprecated(since="0.4", msg_suffix="use monai.apps.deepedit.transforms.FindAllValidSlicesMissingLabelsd instead.") class FindAllValidSlicesMissingLabelsd(MapTransform): """ Find/List all valid slices in the labels. @@ -919,6 +974,9 @@ class FindAllValidSlicesMissingLabelsd(MapTransform): Args: label: key to the label source. sids: key to store slices indices having valid label map. + + .. deprecated:: 0.4 + Use :py:class:`monai.apps.deepedit.transforms.FindAllValidSlicesMissingLabelsd` instead. """ def __init__( diff --git a/requirements.txt b/requirements.txt index 2b9b6e5af..283506a45 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ torch>=1.7 aiofiles==0.8.0 fastapi==0.73.0 SimpleITK>=2.1 -monai[nibabel, skimage, pillow, tensorboard, gdown, ignite, torchvision, itk, tqdm, lmdb, psutil, openslide]>=0.8.1 +monai-weekly[nibabel, skimage, pillow, tensorboard, gdown, ignite, torchvision, itk, tqdm, lmdb, psutil, openslide] PyYAML==6.0 python-multipart==0.0.5 requests-toolbelt==0.9.1 diff --git a/sample-apps/radiology/lib/infers/deepedit.py b/sample-apps/radiology/lib/infers/deepedit.py index ea542a99d..a858cf25f 100644 --- a/sample-apps/radiology/lib/infers/deepedit.py +++ b/sample-apps/radiology/lib/infers/deepedit.py @@ -10,6 +10,12 @@ # limitations under the License. from typing import Callable, Sequence, Union +from monai.apps.deepedit.transforms import ( + AddGuidanceFromPointsDeepEditd, + AddGuidanceSignalDeepEditd, + DiscardAddGuidanced, + ResizeGuidanceMultipleLabelDeepEditd, +) from monai.inferers import Inferer, SimpleInferer from monai.transforms import ( Activationsd, @@ -24,12 +30,6 @@ ToNumpyd, ) -from monailabel.deepedit.multilabel.transforms import ( - AddGuidanceFromPointsCustomd, - AddGuidanceSignalCustomd, - DiscardAddGuidanced, - ResizeGuidanceMultipleLabelCustomd, -) from monailabel.interfaces.tasks.infer import InferTask, InferType from monailabel.transform.post import Restored @@ -78,10 +78,10 @@ def pre_transforms(self, data=None): if self.type == InferType.DEEPEDIT: t.extend( [ - AddGuidanceFromPointsCustomd(ref_image="image", guidance="guidance", label_names=self.labels), + AddGuidanceFromPointsDeepEditd(ref_image="image", guidance="guidance", label_names=self.labels), Resized(keys="image", spatial_size=self.spatial_size, mode="area"), - ResizeGuidanceMultipleLabelCustomd(guidance="guidance", ref_image="image"), - AddGuidanceSignalCustomd( + ResizeGuidanceMultipleLabelDeepEditd(guidance="guidance", ref_image="image"), + AddGuidanceSignalDeepEditd( keys="image", guidance="guidance", number_intensity_ch=self.number_intensity_ch ), ] diff --git a/sample-apps/radiology/lib/infers/segmentation_spleen.py b/sample-apps/radiology/lib/infers/segmentation_spleen.py index 65833a00f..a8f832015 100644 --- a/sample-apps/radiology/lib/infers/segmentation_spleen.py +++ b/sample-apps/radiology/lib/infers/segmentation_spleen.py @@ -13,8 +13,8 @@ from monai.inferers import Inferer, SlidingWindowInferer from monai.transforms import ( Activationsd, - AddChanneld, AsDiscreted, + EnsureChannelFirstd, EnsureTyped, LoadImaged, ScaleIntensityRanged, @@ -54,7 +54,7 @@ def __init__( def pre_transforms(self, data=None) -> Sequence[Callable]: return [ LoadImaged(keys="image"), - AddChanneld(keys="image"), + EnsureChannelFirstd(keys="image"), Spacingd(keys="image", pixdim=[1.0, 1.0, 1.0]), ScaleIntensityRanged(keys="image", a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), EnsureTyped(keys="image"), diff --git a/sample-apps/radiology/lib/trainers/deepedit.py b/sample-apps/radiology/lib/trainers/deepedit.py index 3da70f651..36a51a38d 100644 --- a/sample-apps/radiology/lib/trainers/deepedit.py +++ b/sample-apps/radiology/lib/trainers/deepedit.py @@ -11,6 +11,16 @@ import logging import torch +from monai.apps.deepedit.interaction import Interaction +from monai.apps.deepedit.transforms import ( + AddGuidanceSignalDeepEditd, + AddInitialSeedPointMissingLabelsd, + AddRandomGuidanceDeepEditd, + FindAllValidSlicesMissingLabelsd, + FindDiscrepancyRegionsDeepEditd, + NormalizeLabelsInDatasetd, + SplitPredsLabeld, +) from monai.handlers import MeanDice, from_engine from monai.inferers import SimpleInferer from monai.losses import DiceCELoss @@ -31,16 +41,6 @@ ) from monailabel.deepedit.handlers import TensorBoardImageHandler -from monailabel.deepedit.multilabel.interaction import Interaction -from monailabel.deepedit.multilabel.transforms import ( - AddGuidanceSignalCustomd, - AddInitialSeedPointMissingLabelsd, - AddRandomGuidanceCustomd, - FindAllValidSlicesMissingLabelsd, - FindDiscrepancyRegionsCustomd, - NormalizeLabelsInDatasetd, - SplitPredsLabeld, -) from monailabel.tasks.train.basic_train import BasicTrainTask, Context logger = logging.getLogger(__name__) @@ -85,14 +85,14 @@ def get_click_transforms(self, context: Context): AsDiscreted(keys="pred", argmax=True), ToNumpyd(keys=("image", "label", "pred")), # Transforms for click simulation - FindDiscrepancyRegionsCustomd(keys="label", pred="pred", discrepancy="discrepancy"), - AddRandomGuidanceCustomd( + FindDiscrepancyRegionsDeepEditd(keys="label", pred="pred", discrepancy="discrepancy"), + AddRandomGuidanceDeepEditd( keys="NA", guidance="guidance", discrepancy="discrepancy", probability="probability", ), - AddGuidanceSignalCustomd(keys="image", guidance="guidance", number_intensity_ch=self.number_intensity_ch), + AddGuidanceSignalDeepEditd(keys="image", guidance="guidance", number_intensity_ch=self.number_intensity_ch), # ToTensord(keys=("image", "label")), ] @@ -114,7 +114,7 @@ def train_pre_transforms(self, context: Context): # Transforms for click simulation FindAllValidSlicesMissingLabelsd(keys="label", sids="sids"), AddInitialSeedPointMissingLabelsd(keys="label", guidance="guidance", sids="sids"), - AddGuidanceSignalCustomd(keys="image", guidance="guidance", number_intensity_ch=self.number_intensity_ch), + AddGuidanceSignalDeepEditd(keys="image", guidance="guidance", number_intensity_ch=self.number_intensity_ch), # ToTensord(keys=("image", "label")), SelectItemsd(keys=("image", "label", "guidance", "label_names")), @@ -126,8 +126,7 @@ def train_post_transforms(self, context: Context): AsDiscreted( keys=("pred", "label"), argmax=(True, False), - to_onehot=(True, True), - n_classes=len(self._labels), + to_onehot=(len(self._labels), len(self._labels)), ), SplitPredsLabeld(keys="pred"), ] @@ -144,7 +143,7 @@ def val_pre_transforms(self, context: Context): # Transforms for click simulation FindAllValidSlicesMissingLabelsd(keys="label", sids="sids"), AddInitialSeedPointMissingLabelsd(keys="label", guidance="guidance", sids="sids"), - AddGuidanceSignalCustomd(keys="image", guidance="guidance", number_intensity_ch=self.number_intensity_ch), + AddGuidanceSignalDeepEditd(keys="image", guidance="guidance", number_intensity_ch=self.number_intensity_ch), # ToTensord(keys=("image", "label")), SelectItemsd(keys=("image", "label", "guidance", "label_names")), diff --git a/sample-apps/radiology/lib/trainers/segmentation_spleen.py b/sample-apps/radiology/lib/trainers/segmentation_spleen.py index 6b9e12aff..4f84e179b 100644 --- a/sample-apps/radiology/lib/trainers/segmentation_spleen.py +++ b/sample-apps/radiology/lib/trainers/segmentation_spleen.py @@ -16,9 +16,9 @@ from monai.optimizers import Novograd from monai.transforms import ( Activationsd, - AddChanneld, AsDiscreted, CropForegroundd, + EnsureChannelFirstd, EnsureTyped, LoadImaged, RandCropByPosNegLabeld, @@ -57,7 +57,7 @@ def loss_function(self, context: Context): def train_pre_transforms(self, context: Context): return [ LoadImaged(keys=("image", "label")), - AddChanneld(keys=("image", "label")), + EnsureChannelFirstd(keys=("image", "label")), Spacingd( keys=("image", "label"), pixdim=(1.0, 1.0, 1.0), @@ -95,7 +95,7 @@ def train_post_transforms(self, context: Context): def val_pre_transforms(self, context: Context): return [ LoadImaged(keys=("image", "label")), - AddChanneld(keys=("image", "label")), + EnsureChannelFirstd(keys=("image", "label")), Spacingd( keys=("image", "label"), pixdim=(1.0, 1.0, 1.0), diff --git a/setup.cfg b/setup.cfg index 7253e6dd3..d5516a109 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,7 +27,7 @@ install_requires = aiofiles==0.8.0 fastapi==0.73.0 SimpleITK>=2.1 - monai[nibabel, skimage, pillow, tensorboard, gdown, ignite, torchvision, itk, tqdm, lmdb, psutil, openslide]>=0.8.1 + monai-weekly[nibabel, skimage, pillow, tensorboard, gdown, ignite, torchvision, itk, tqdm, lmdb, psutil, openslide] PyYAML==6.0 python-multipart==0.0.5 requests-toolbelt==0.9.1 From acf2f3713146e2d2d0d7850e185ecb79a9bed083 Mon Sep 17 00:00:00 2001 From: SACHIDANAND ALLE Date: Mon, 16 May 2022 07:07:27 -0700 Subject: [PATCH 21/24] Mention python versions which are supported (#786) Signed-off-by: SACHIDANAND ALLE Signed-off-by: Markus Hinsche --- docs/source/installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/installation.rst b/docs/source/installation.rst index a5a3ac0b0..847c2f358 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -6,7 +6,7 @@ Prerequisites --------------- MONAI Label supports both **Ubuntu** and **Windows** OS with GPU/CUDA enabled. -Make sure you have python 3.x version environment with PyTorch and CUDA installed. +Make sure you have python 3.7/3.8/3.9 version environment with PyTorch and CUDA installed. MONAI Label features on other python version are not verified. - Install `Python `_ - Install the following Python libraries From ec4f069e6f4c25f643e171a3fea911744c10fd4e Mon Sep 17 00:00:00 2001 From: Andres Diaz-Pinto Date: Mon, 16 May 2022 15:27:15 +0100 Subject: [PATCH 22/24] Add original labels option Slicer UI (#785) * Add original labels option Slicer UI Signed-off-by: Andres Diaz-Pinto * Update Slicer module Signed-off-by: Andres Diaz-Pinto Signed-off-by: Markus Hinsche --- plugins/slicer/MONAILabel/MONAILabel.py | 42 +++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/plugins/slicer/MONAILabel/MONAILabel.py b/plugins/slicer/MONAILabel/MONAILabel.py index 801c984aa..5958f4c8e 100644 --- a/plugins/slicer/MONAILabel/MONAILabel.py +++ b/plugins/slicer/MONAILabel/MONAILabel.py @@ -151,6 +151,17 @@ def __init__(self, parent): ) allowOverlapCheckBox.connect("toggled(bool)", self.onUpdateAllowOverlap) + originalLabelCheckBox = qt.QCheckBox() + originalLabelCheckBox.checked = True + originalLabelCheckBox.toolTip = "Enable this option to first read original label (predictions)" + groupLayout.addRow("Original Labels:", originalLabelCheckBox) + parent.registerProperty( + "MONAILabel/originalLabel", + ctk.ctkBooleanMapper(originalLabelCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))), + "valueAsInt", + str(qt.SIGNAL("valueAsIntChanged(int)")), + ) + developerModeCheckBox = qt.QCheckBox() developerModeCheckBox.checked = False developerModeCheckBox.toolTip = "Enable this option to find options tab etc..." @@ -1206,6 +1217,37 @@ def onNextSampleButton(self): nodeNames=node_name, fileNames=image_name, uris=download_uri, checksums=checksum )[0] + if slicer.util.settingsValue("MONAILabel/originalLabel", True, converter=slicer.util.toBool): + try: + download_uri = f"{self.serverUrl()}/datastore/label?label={quote_plus(image_id)}&tag=original" + logging.info(download_uri) + + sampleDataLogic = SampleData.SampleDataLogic() + + originalNode = sampleDataLogic.downloadFromURL( + nodeNames="segmentation_" + image_id, + loadFileTypes="SegmentationFile", + fileNames=image_name, + uris=download_uri, + checksums=checksum, + )[0] + + previousSegmentation = self._segmentNode.GetSegmentation() + originalSegmentation = originalNode.GetSegmentation() + + for idx, label in enumerate(self.info.get("labels")): + segmentOriginal = originalSegmentation.GetSegment(f"Segment_{idx+1}") + segmentOriginal.SetName(label) + self._segmentNode.RemoveSegment(label) + + previousSegmentation.DeepCopy(originalSegmentation) + # Delete original segmentation node + slicer.mrmlScene.RemoveNode(originalNode) + self.showSegmentationsIn3D() + + except: + print("Original label not found ... ") + self.initSample(sample) except: From 71f5aa47e6e040a8b53aed2ba42d947dfc163db9 Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Tue, 17 May 2022 13:25:55 +0200 Subject: [PATCH 23/24] Fix: Check first dimension for multichannel Signed-off-by: Markus Hinsche --- monailabel/transform/writer.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/monailabel/transform/writer.py b/monailabel/transform/writer.py index 26e917680..1d6728a8f 100644 --- a/monailabel/transform/writer.py +++ b/monailabel/transform/writer.py @@ -204,8 +204,16 @@ def __call__(self, data): return output_file, output_json - def is_multichannel_image(self, image_np): - return len(image_np.shape) == 4 and image_np.shape[-1] > 1 + def is_multichannel_image(self, image_np: np.ndarray) -> bool: + """Check if the provided image contains multiple channels + + Args: + image_np : Expected shape (channels, width, height, batch) + + Returns: + bool: If this is a multi-channel image or not + """ + return len(image_np.shape) == 4 and image_np.shape[0] > 1 class ClassificationWriter: From 26065862a5eff049a6c70786b515fc16f9fa5375 Mon Sep 17 00:00:00 2001 From: Markus Hinsche Date: Tue, 17 May 2022 13:26:23 +0200 Subject: [PATCH 24/24] Polish unit test to be more specific about dimensions Signed-off-by: Markus Hinsche --- tests/unit/transform/test_writer.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/unit/transform/test_writer.py b/tests/unit/transform/test_writer.py index 4eca71ab5..4251db783 100644 --- a/tests/unit/transform/test_writer.py +++ b/tests/unit/transform/test_writer.py @@ -19,7 +19,10 @@ }, ] -MULTI_CHANNEL_DATA = np.array([[[[1, 0], [0, 1], [1, 0]], [[0, 1], [1, 0], [0, 1]]]]).astype(np.float32) +CHANNELS = 2 +WIDTH = 15 +HEIGHT = 10 +MULTI_CHANNEL_DATA = np.zeros((CHANNELS, WIDTH, HEIGHT, 1)) COLOR_MAP = { # according to getLabelColor() [https://github.com/Project-MONAI/MONAILabel/blob/6cc72c542c9bc6c5181af89550e7e397537d74e3/plugins/slicer/MONAILabel/MONAILabel.py#L1485] # noqa @@ -50,8 +53,7 @@ def test_seg_nrrd(self, args, input_data): self.assertEqual(os.path.exists(output_file), True) arr_full, header = nrrd.read(output_file) - # DEBUG - print(header) + self.assertEqual(arr_full.shape, (CHANNELS, WIDTH, HEIGHT, 1)) space_directions_expected = np.array( [[np.nan, np.nan, np.nan], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]]