diff --git a/docs/source/networks.rst b/docs/source/networks.rst index 31bc6de6f8..62a1c82f2f 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -256,6 +256,19 @@ Layers .. autoclass:: Flatten :members: +`Reshape` +~~~~~~~~~ +.. autoclass:: Reshape + :members: + +`separable_filtering` +~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: separable_filtering + +`apply_filter` +~~~~~~~~~~~~~~ +.. autofunction:: apply_filter + `GaussianFilter` ~~~~~~~~~~~~~~~~ .. autoclass:: GaussianFilter diff --git a/monai/networks/layers/__init__.py b/monai/networks/layers/__init__.py index b2defc703d..b6c13472c4 100644 --- a/monai/networks/layers/__init__.py +++ b/monai/networks/layers/__init__.py @@ -22,6 +22,7 @@ Reshape, SavitzkyGolayFilter, SkipConnection, + apply_filter, separable_filtering, ) from .spatial_transforms import AffineTransform, grid_count, grid_grad, grid_pull, grid_push diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index 0f9f78b4be..24fe91687b 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -27,6 +27,7 @@ SkipMode, look_up_option, optional_import, + version_leq, ) from monai.utils.misc import issequenceiterable @@ -35,15 +36,16 @@ fft, _ = optional_import("torch.fft") __all__ = [ - "SkipConnection", + "ChannelPad", "Flatten", "GaussianFilter", + "HilbertTransform", "LLTM", "Reshape", - "separable_filtering", "SavitzkyGolayFilter", - "HilbertTransform", - "ChannelPad", + "SkipConnection", + "apply_filter", + "separable_filtering", ] @@ -211,25 +213,97 @@ def separable_filtering(x: torch.Tensor, kernels: List[torch.Tensor], mode: str Args: x: the input image. must have shape (batch, channels, H[, W, ...]). kernels: kernel along each spatial dimension. - could be a single kernel (duplicated for all dimension), or `spatial_dims` number of kernels. + could be a single kernel (duplicated for all spatial dimensions), or + a list of `spatial_dims` number of kernels. mode (string, optional): padding mode passed to convolution class. ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``. Modes other than ``'zeros'`` require PyTorch version >= 1.5.1. See torch.nn.Conv1d() for more information. Raises: TypeError: When ``x`` is not a ``torch.Tensor``. + + Examples: + + .. code-block:: python + + >>> import torch + >>> from monai.networks.layers import separable_filtering + >>> img = torch.randn(2, 4, 32, 32) # batch_size 2, channels 4, 32x32 2D images + # applying a [-1, 0, 1] filter along each of the spatial dimensions. + # the output shape is the same as the input shape. + >>> out = separable_filtering(img, torch.tensor((-1., 0., 1.))) + # applying `[-1, 0, 1]`, `[1, 0, -1]` filters along two spatial dimensions respectively. + # the output shape is the same as the input shape. + >>> out = separable_filtering(img, [torch.tensor((-1., 0., 1.)), torch.tensor((1., 0., -1.))]) + """ if not isinstance(x, torch.Tensor): raise TypeError(f"x must be a torch.Tensor but is {type(x).__name__}.") spatial_dims = len(x.shape) - 2 - _kernels = [s.float() for s in kernels] + if isinstance(kernels, torch.Tensor): + kernels = [kernels] * spatial_dims + _kernels = [s.to(x) for s in kernels] _paddings = [(k.shape[0] - 1) // 2 for k in _kernels] n_chs = x.shape[1] pad_mode = "constant" if mode == "zeros" else mode - return _separable_filtering_conv(x, kernels, pad_mode, spatial_dims - 1, spatial_dims, _paddings, n_chs) + return _separable_filtering_conv(x, _kernels, pad_mode, spatial_dims - 1, spatial_dims, _paddings, n_chs) + + +def apply_filter(x: torch.Tensor, kernel: torch.Tensor, **kwargs) -> torch.Tensor: + """ + Filtering `x` with `kernel` independently for each batch and channel respectively. + + Args: + x: the input image, must have shape (batch, channels, H[, W, D]). + kernel: `kernel` must at least have the spatial shape (H_k[, W_k, D_k]). + `kernel` shape must be broadcastable to the `batch` and `channels` dimensions of `x`. + kwargs: keyword arguments passed to `conv*d()` functions. + + Returns: + The filtered `x`. + + Examples: + + .. code-block:: python + + >>> import torch + >>> from monai.networks.layers import apply_filter + >>> img = torch.rand(2, 5, 10, 10) # batch_size 2, channels 5, 10x10 2D images + >>> out = apply_filter(img, torch.rand(3, 3)) # spatial kernel + >>> out = apply_filter(img, torch.rand(5, 3, 3)) # channel-wise kernels + >>> out = apply_filter(img, torch.rand(2, 5, 3, 3)) # batch-, channel-wise kernels + + """ + if not isinstance(x, torch.Tensor): + raise TypeError(f"x must be a torch.Tensor but is {type(x).__name__}.") + batch, chns, *spatials = x.shape + n_spatial = len(spatials) + if n_spatial > 3: + raise NotImplementedError(f"Only spatial dimensions up to 3 are supported but got {n_spatial}.") + k_size = len(kernel.shape) + if k_size < n_spatial or k_size > n_spatial + 2: + raise ValueError( + f"kernel must have {n_spatial} ~ {n_spatial + 2} dimensions to match the input shape {x.shape}." + ) + kernel = kernel.to(x) + # broadcast kernel size to (batch chns, spatial_kernel_size) + kernel = kernel.expand(batch, chns, *kernel.shape[(k_size - n_spatial) :]) + kernel = kernel.reshape(-1, 1, *kernel.shape[2:]) # group=1 + x = x.view(1, kernel.shape[0], *spatials) + conv = [F.conv1d, F.conv2d, F.conv3d][n_spatial - 1] + if "padding" not in kwargs: + if version_leq(torch.__version__, "1.10.0b"): + # even-sized kernels are not supported + kwargs["padding"] = [(k - 1) // 2 for k in kernel.shape[2:]] + else: + kwargs["padding"] = "same" + if "stride" not in kwargs: + kwargs["stride"] = 1 + output = conv(x, kernel, groups=kernel.shape[0], bias=None, **kwargs) + return output.view(batch, chns, *output.shape[2:]) class SavitzkyGolayFilter(nn.Module): diff --git a/monai/transforms/post/array.py b/monai/transforms/post/array.py index 9498087476..7cbb6aad44 100644 --- a/monai/transforms/post/array.py +++ b/monai/transforms/post/array.py @@ -18,11 +18,10 @@ import numpy as np import torch -import torch.nn.functional as F from monai.config.type_definitions import NdarrayOrTensor from monai.networks import one_hot -from monai.networks.layers import GaussianFilter +from monai.networks.layers import GaussianFilter, apply_filter from monai.transforms.transform import Transform from monai.transforms.utils import fill_holes, get_largest_connected_component_mask from monai.transforms.utils_pytorch_numpy_unification import unravel_index @@ -70,11 +69,11 @@ def __init__(self, sigmoid: bool = False, softmax: bool = False, other: Optional def __call__( self, - img: torch.Tensor, + img: NdarrayOrTensor, sigmoid: Optional[bool] = None, softmax: Optional[bool] = None, other: Optional[Callable] = None, - ) -> torch.Tensor: + ) -> NdarrayOrTensor: """ Args: sigmoid: whether to execute sigmoid function on model output before transform. @@ -96,17 +95,18 @@ def __call__( raise TypeError(f"other must be None or callable but is {type(other).__name__}.") # convert to float as activation must operate on float tensor - img = img.float() + img_t: torch.Tensor + img_t, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float) # type: ignore if sigmoid or self.sigmoid: - img = torch.sigmoid(img) + img_t = torch.sigmoid(img_t) if softmax or self.softmax: - img = torch.softmax(img, dim=0) + img_t = torch.softmax(img_t, dim=0) act_func = self.other if other is None else other if act_func is not None: - img = act_func(img) - - return img + img_t = act_func(img_t) + out, *_ = convert_to_dst_type(img_t, img) + return out class AsDiscrete(Transform): @@ -164,7 +164,7 @@ def __init__( @deprecated_arg("n_classes", since="0.6") def __call__( self, - img: torch.Tensor, + img: NdarrayOrTensor, argmax: Optional[bool] = None, to_onehot: Optional[bool] = None, num_classes: Optional[int] = None, @@ -172,7 +172,7 @@ def __call__( logit_thresh: Optional[float] = None, rounding: Optional[str] = None, n_classes: Optional[int] = None, - ) -> torch.Tensor: + ) -> NdarrayOrTensor: """ Args: img: the input tensor data to convert, if no channel dimension when converting to `One-Hot`, @@ -197,24 +197,27 @@ def __call__( # in case the new num_classes is default but you still call deprecated n_classes if n_classes is not None and num_classes is None: num_classes = n_classes + img_t: torch.Tensor + img_t, *_ = convert_data_type(img, torch.Tensor) # type: ignore if argmax or self.argmax: - img = torch.argmax(img, dim=0, keepdim=True) + img_t = torch.argmax(img_t, dim=0, keepdim=True) if to_onehot or self.to_onehot: _nclasses = self.num_classes if num_classes is None else num_classes if not isinstance(_nclasses, int): raise AssertionError("One of self.num_classes or num_classes must be an integer") - img = one_hot(img, num_classes=_nclasses, dim=0) + img_t = one_hot(img_t, num_classes=_nclasses, dim=0) if threshold_values or self.threshold_values: - img = img >= (self.logit_thresh if logit_thresh is None else logit_thresh) + img_t = img_t >= (self.logit_thresh if logit_thresh is None else logit_thresh) rounding = self.rounding if rounding is None else rounding if rounding is not None: look_up_option(rounding, ["torchrounding"]) - img = torch.round(img) + img_t = torch.round(img_t) - return img.float() + img, *_ = convert_to_dst_type(img_t, img, dtype=torch.float) + return img class KeepLargestConnectedComponent(Transform): @@ -275,7 +278,7 @@ def __init__( If the data is in one-hot format, this is used to determine which channels to apply. independent: whether to treat ``applied_labels`` as a union of foreground labels. If ``True``, the connected component analysis will be performed on each foreground label independently - and return the intersection of the largest component. + and return the intersection of the largest components. If ``False``, the analysis will be performed on the union of foreground labels. default is `True`. connectivity: Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor. @@ -368,7 +371,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: if isinstance(img, torch.Tensor): if hasattr(torch, "isin"): appl_lbls = torch.as_tensor(self.applied_labels, device=img.device) - return torch.where(torch.isin(img, appl_lbls), img, 0) + return torch.where(torch.isin(img, appl_lbls), img, torch.tensor(0.0).to(img)) else: out = self(img.detach().cpu().numpy()) out, *_ = convert_to_dst_type(out, img) @@ -460,7 +463,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: class LabelToContour(Transform): """ - Return the contour of binary input images that only compose of 0 and 1, with Laplace kernel + Return the contour of binary input images that only compose of 0 and 1, with Laplacian kernel set as default for edge detection. Typical usage is to plot the edge of label or segmentation output. Args: @@ -471,12 +474,14 @@ class LabelToContour(Transform): """ + backend = [TransformBackends.TORCH] + def __init__(self, kernel_type: str = "Laplace") -> None: if kernel_type != "Laplace": raise NotImplementedError('Currently only kernel_type="Laplace" is supported.') self.kernel_type = kernel_type - def __call__(self, img: torch.Tensor) -> torch.Tensor: + def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: """ Args: img: torch tensor data to extract the contour, with shape: [channels, height, width[, depth]] @@ -492,22 +497,20 @@ def __call__(self, img: torch.Tensor) -> torch.Tensor: ideally the edge should be thin enough, but now it has a thickness. """ - channels = img.shape[0] - img_ = img.unsqueeze(0) - if img.ndimension() == 3: - kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=torch.float32, device=img.device) - kernel = kernel.repeat(channels, 1, 1, 1) - contour_img = F.conv2d(img_, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels) - elif img.ndimension() == 4: - kernel = -1 * torch.ones(3, 3, 3, dtype=torch.float32, device=img.device) - kernel[1, 1, 1] = 26 - kernel = kernel.repeat(channels, 1, 1, 1, 1) - contour_img = F.conv3d(img_, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels) + img_: torch.Tensor = convert_data_type(img, torch.Tensor)[0] # type: ignore + spatial_dims = len(img_.shape) - 1 + img_ = img_.unsqueeze(0) # adds a batch dim + if spatial_dims == 2: + kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=torch.float32) + elif spatial_dims == 3: + kernel = -1.0 * torch.ones(3, 3, 3, dtype=torch.float32) + kernel[1, 1, 1] = 26.0 else: - raise ValueError(f"Unsupported img dimension: {img.ndimension()}, available options are [4, 5].") - + raise ValueError(f"{self.__class__} can only handle 2D or 3D images.") + contour_img = apply_filter(img_, kernel) contour_img.clamp_(min=0.0, max=1.0) - return contour_img.squeeze(0) + output, *_ = convert_to_dst_type(contour_img.squeeze(0), img) + return output class Ensemble: @@ -528,7 +531,7 @@ def post_convert(img: torch.Tensor, orig_img: Union[Sequence[NdarrayOrTensor], N return out -class MeanEnsemble(Ensemble): +class MeanEnsemble(Ensemble, Transform): """ Execute mean ensemble on the input data. The input data can be a list or tuple of PyTorch Tensor with shape: [C[, H, W, D]], @@ -551,6 +554,8 @@ class MeanEnsemble(Ensemble): """ + backend = [TransformBackends.TORCH] + def __init__(self, weights: Optional[Union[Sequence[float], NdarrayOrTensor]] = None) -> None: self.weights = torch.as_tensor(weights, dtype=torch.float) if weights is not None else None @@ -569,7 +574,7 @@ def __call__(self, img: Union[Sequence[NdarrayOrTensor], NdarrayOrTensor]) -> Nd return self.post_convert(out_pt, img) -class VoteEnsemble(Ensemble): +class VoteEnsemble(Ensemble, Transform): """ Execute vote ensemble on the input data. The input data can be a list or tuple of PyTorch Tensor with shape: [C[, H, W, D]], @@ -589,6 +594,8 @@ class VoteEnsemble(Ensemble): """ + backend = [TransformBackends.TORCH] + def __init__(self, num_classes: Optional[int] = None) -> None: self.num_classes = num_classes @@ -665,9 +672,9 @@ def __init__( self.prob_threshold = prob_threshold if isinstance(box_size, int): self.box_size = np.asarray([box_size] * spatial_dims) + elif len(box_size) != spatial_dims: + raise ValueError("the sequence length of box_size should be the same as spatial_dims.") else: - if len(box_size) != spatial_dims: - raise ValueError("the sequence length of box_size should be the same as spatial_dims.") self.box_size = np.asarray(box_size) if self.box_size.min() <= 0: raise ValueError("box_size should be larger than 0.") diff --git a/monai/transforms/post/dictionary.py b/monai/transforms/post/dictionary.py index 19a7bc9359..596b4b3a21 100644 --- a/monai/transforms/post/dictionary.py +++ b/monai/transforms/post/dictionary.py @@ -116,7 +116,7 @@ def __init__( self.other = ensure_tuple_rep(other, len(self.keys)) self.converter = Activations() - def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key, sigmoid, softmax, other in self.key_iterator(d, self.sigmoid, self.softmax, self.other): d[key] = self.converter(d[key], sigmoid, softmax, other) @@ -178,7 +178,7 @@ def __init__( self.rounding = ensure_tuple_rep(rounding, len(self.keys)) self.converter = AsDiscrete() - def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key, argmax, to_onehot, num_classes, threshold_values, logit_thresh, rounding in self.key_iterator( d, self.argmax, self.to_onehot, self.num_classes, self.threshold_values, self.logit_thresh, self.rounding @@ -211,7 +211,7 @@ def __init__( If the data is in one-hot format, this is the channel indices to apply transform. independent: whether to treat ``applied_labels`` as a union of foreground labels. If ``True``, the connected component analysis will be performed on each foreground label independently - and return the intersection of the largest component. + and return the intersection of the largest components. If ``False``, the analysis will be performed on the union of foreground labels. default is `True`. connectivity: Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor. @@ -300,6 +300,8 @@ class LabelToContourd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.LabelToContour`. """ + backend = LabelToContour.backend + def __init__(self, keys: KeysCollection, kernel_type: str = "Laplace", allow_missing_keys: bool = False) -> None: """ Args: @@ -312,7 +314,7 @@ def __init__(self, keys: KeysCollection, kernel_type: str = "Laplace", allow_mis super().__init__(keys, allow_missing_keys) self.converter = LabelToContour(kernel_type=kernel_type) - def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) @@ -325,6 +327,8 @@ class Ensembled(MapTransform): """ + backend = list(set(VoteEnsemble.backend) & set(MeanEnsemble.backend)) + def __init__( self, keys: KeysCollection, @@ -371,6 +375,8 @@ class MeanEnsembled(Ensembled): Dictionary-based wrapper of :py:class:`monai.transforms.MeanEnsemble`. """ + backend = MeanEnsemble.backend + def __init__( self, keys: KeysCollection, @@ -404,6 +410,8 @@ class VoteEnsembled(Ensembled): Dictionary-based wrapper of :py:class:`monai.transforms.VoteEnsemble`. """ + backend = VoteEnsemble.backend + def __init__( self, keys: KeysCollection, output_key: Optional[str] = None, num_classes: Optional[int] = None ) -> None: diff --git a/monai/transforms/utils_create_transform_ims.py b/monai/transforms/utils_create_transform_ims.py index 3325b987dc..84aaa348fe 100644 --- a/monai/transforms/utils_create_transform_ims.py +++ b/monai/transforms/utils_create_transform_ims.py @@ -240,9 +240,6 @@ def pre_process_data(data, ndim, is_map, is_post): if ndim == 2: for k in keys: data[k] = data[k][..., data[k].shape[-1] // 2] - if is_post: - for k in keys: - data[k] = torch.as_tensor(data[k]) if is_map: return data diff --git a/tests/test_activations.py b/tests/test_activations.py index 7d8b3e4c38..92b1e1d945 100644 --- a/tests/test_activations.py +++ b/tests/test_activations.py @@ -16,27 +16,36 @@ from monai.networks.layers.factories import Act from monai.transforms import Activations - -TEST_CASE_1 = [ - {"sigmoid": True, "softmax": False, "other": None}, - torch.tensor([[[0.0, 1.0], [2.0, 3.0]]]), - torch.tensor([[[0.5000, 0.7311], [0.8808, 0.9526]]]), - (1, 2, 2), -] - -TEST_CASE_2 = [ - {"sigmoid": False, "softmax": True, "other": None}, - torch.tensor([[[0.0, 1.0]], [[2.0, 3.0]]]), - torch.tensor([[[0.1192, 0.1192]], [[0.8808, 0.8808]]]), - (2, 1, 2), -] - -TEST_CASE_3 = [ - {"sigmoid": False, "softmax": False, "other": torch.tanh}, - torch.tensor([[[0.0, 1.0], [2.0, 3.0]]]), - torch.tensor([[[0.0000, 0.7616], [0.9640, 0.9951]]]), - (1, 2, 2), -] +from tests.utils import TEST_NDARRAYS, assert_allclose + +TEST_CASES = [] +for p in TEST_NDARRAYS: + TEST_CASES.append( + [ + {"sigmoid": True, "softmax": False, "other": None}, + p([[[0.0, 1.0], [2.0, 3.0]]]), + p([[[0.5000, 0.7311], [0.8808, 0.9526]]]), + (1, 2, 2), + ] + ) + + TEST_CASES.append( + [ + {"sigmoid": False, "softmax": True, "other": None}, + p([[[0.0, 1.0]], [[2.0, 3.0]]]), + p([[[0.1192, 0.1192]], [[0.8808, 0.8808]]]), + (2, 1, 2), + ] + ) + + TEST_CASES.append( + [ + {"sigmoid": False, "softmax": False, "other": torch.tanh}, + p([[[0.0, 1.0], [2.0, 3.0]]]), + p([[[0.0000, 0.7616], [0.9640, 0.9951]]]), + (1, 2, 2), + ] + ) TEST_CASE_4 = [ "swish", @@ -67,12 +76,12 @@ class TestActivations(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) + @parameterized.expand(TEST_CASES[:3]) def test_value_shape(self, input_param, img, out, expected_shape): result = Activations(**input_param)(img) def _compare(ret, out, shape): - torch.testing.assert_allclose(ret, out) + assert_allclose(ret, out, rtol=1e-3) self.assertTupleEqual(ret.shape, shape) if isinstance(result, (list, tuple)): diff --git a/tests/test_activationsd.py b/tests/test_activationsd.py index 0a981f27e8..2c522d36ed 100644 --- a/tests/test_activationsd.py +++ b/tests/test_activationsd.py @@ -15,40 +15,46 @@ from parameterized import parameterized from monai.transforms import Activationsd - -TEST_CASE_1 = [ - {"keys": ["pred", "label"], "sigmoid": False, "softmax": [True, False], "other": None}, - {"pred": torch.tensor([[[0.0, 1.0]], [[2.0, 3.0]]]), "label": torch.tensor([[[0.0, 1.0]], [[2.0, 3.0]]])}, - { - "pred": torch.tensor([[[0.1192, 0.1192]], [[0.8808, 0.8808]]]), - "label": torch.tensor([[[0.0, 1.0]], [[2.0, 3.0]]]), - }, - (2, 1, 2), -] - -TEST_CASE_2 = [ - {"keys": ["pred", "label"], "sigmoid": False, "softmax": False, "other": [torch.tanh, None]}, - {"pred": torch.tensor([[[0.0, 1.0], [2.0, 3.0]]]), "label": torch.tensor([[[0.0, 1.0], [2.0, 3.0]]])}, - {"pred": torch.tensor([[[0.0000, 0.7616], [0.9640, 0.9951]]]), "label": torch.tensor([[[0.0, 1.0], [2.0, 3.0]]])}, - (1, 2, 2), -] - -TEST_CASE_3 = [ - {"keys": "pred", "sigmoid": False, "softmax": False, "other": torch.tanh}, - {"pred": torch.tensor([[[0.0, 1.0], [2.0, 3.0]]])}, - {"pred": torch.tensor([[[0.0000, 0.7616], [0.9640, 0.9951]]])}, - (1, 2, 2), -] +from tests.utils import TEST_NDARRAYS, assert_allclose + +TEST_CASES = [] +for p in TEST_NDARRAYS: + TEST_CASES.append( + [ + {"keys": ["pred", "label"], "sigmoid": False, "softmax": [True, False], "other": None}, + {"pred": p([[[0.0, 1.0]], [[2.0, 3.0]]]), "label": p([[[0.0, 1.0]], [[2.0, 3.0]]])}, + {"pred": p([[[0.1192, 0.1192]], [[0.8808, 0.8808]]]), "label": p([[[0.0, 1.0]], [[2.0, 3.0]]])}, + (2, 1, 2), + ] + ) + + TEST_CASES.append( + [ + {"keys": ["pred", "label"], "sigmoid": False, "softmax": False, "other": [torch.tanh, None]}, + {"pred": p([[[0.0, 1.0], [2.0, 3.0]]]), "label": p([[[0.0, 1.0], [2.0, 3.0]]])}, + {"pred": p([[[0.0000, 0.7616], [0.9640, 0.9951]]]), "label": p([[[0.0, 1.0], [2.0, 3.0]]])}, + (1, 2, 2), + ] + ) + + TEST_CASES.append( + [ + {"keys": "pred", "sigmoid": False, "softmax": False, "other": torch.tanh}, + {"pred": p([[[0.0, 1.0], [2.0, 3.0]]])}, + {"pred": p([[[0.0000, 0.7616], [0.9640, 0.9951]]])}, + (1, 2, 2), + ] + ) class TestActivationsd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) + @parameterized.expand(TEST_CASES) def test_value_shape(self, input_param, test_input, output, expected_shape): result = Activationsd(**input_param)(test_input) - torch.testing.assert_allclose(result["pred"], output["pred"]) + assert_allclose(result["pred"], output["pred"], rtol=1e-3) self.assertTupleEqual(result["pred"].shape, expected_shape) if "label" in result: - torch.testing.assert_allclose(result["label"], output["label"]) + assert_allclose(result["label"], output["label"], rtol=1e-3) self.assertTupleEqual(result["label"].shape, expected_shape) diff --git a/tests/test_apply_filter.py b/tests/test_apply_filter.py new file mode 100644 index 0000000000..6bddfb5cf2 --- /dev/null +++ b/tests/test_apply_filter.py @@ -0,0 +1,87 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch + +from monai.networks.layers import apply_filter + + +class ApplyFilterTestCase(unittest.TestCase): + def test_1d(self): + a = torch.tensor([[list(range(10))]], dtype=torch.float) + out = apply_filter(a, torch.tensor([-1, 0, 1]), stride=1) + expected = np.array([[[1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, -8.0]]]) + np.testing.assert_allclose(out.cpu().numpy(), expected, rtol=1e-4) + if torch.cuda.is_available(): + out = apply_filter(a.cuda(), torch.tensor([-1, 0, 1]).cuda()) + np.testing.assert_allclose(out.cpu().numpy(), expected, rtol=1e-4) + + def test_2d(self): + a = torch.tensor([[[list(range(7)), list(range(7, 0, -1)), list(range(7))]]], dtype=torch.float) + expected = np.array( + [ + [14.0, 21.0, 21.0, 21.0, 21.0, 21.0, 14.0], + [15.0, 24.0, 27.0, 30.0, 33.0, 36.0, 25.0], + [14.0, 21.0, 21.0, 21.0, 21.0, 21.0, 14.0], + ] + ) + expected = expected[None][None] + out = apply_filter(a, torch.tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]])) + np.testing.assert_allclose(out.cpu().numpy(), expected, rtol=1e-4) + if torch.cuda.is_available(): + out = apply_filter(a.cuda(), torch.tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]]).cuda()) + np.testing.assert_allclose(out.cpu().numpy(), expected, rtol=1e-4) + + def test_3d(self): + a = torch.tensor( + [[list(range(7)), list(range(7)), list(range(7))], [list(range(7)), list(range(7)), list(range(7))]], + dtype=torch.float, + ) + a = a[None][None] + a = a.expand(2, 3, -1, -1, -1) + expected = np.array( + [ + [ + [2.0, 6.0, 12.0, 18.0, 24.0, 30.0, 22.0], + [3.0, 9.0, 18.0, 27.0, 36.0, 45.0, 33.0], + [2.0, 6.0, 12.0, 18.0, 24.0, 30.0, 22.0], + ], + [ + [2.0, 6.0, 12.0, 18.0, 24.0, 30.0, 22.0], + [3.0, 9.0, 18.0, 27.0, 36.0, 45.0, 33.0], + [2.0, 6.0, 12.0, 18.0, 24.0, 30.0, 22.0], + ], + ] + ) + expected = expected + # testing shapes + k = torch.tensor([[[1, 1, 1], [1, 1, 1], [1, 1, 1]]]) + for kernel in (k, k[None], k[None][None]): + out = apply_filter(a, kernel) + np.testing.assert_allclose(out.cpu().numpy()[1][2], expected, rtol=1e-4) + if torch.cuda.is_available(): + out = apply_filter(a.cuda(), kernel.cuda()) + np.testing.assert_allclose(out.cpu().numpy()[0][1], expected, rtol=1e-4) + + def test_wrong_args(self): + with self.assertRaisesRegex(ValueError, ""): + apply_filter(torch.ones((1, 2, 3, 2)), torch.ones((2,))) + with self.assertRaisesRegex(NotImplementedError, ""): + apply_filter(torch.ones((1, 1, 1, 2, 3, 2)), torch.ones((2,))) + with self.assertRaisesRegex(TypeError, ""): + apply_filter(((1, 1, 1, 2, 3, 2)), torch.ones((2,))) # type: ignore + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_as_discrete.py b/tests/test_as_discrete.py index bb9457a357..75f6a38d3c 100644 --- a/tests/test_as_discrete.py +++ b/tests/test_as_discrete.py @@ -11,52 +11,52 @@ import unittest -import torch from parameterized import parameterized from monai.transforms import AsDiscrete +from tests.utils import TEST_NDARRAYS, assert_allclose -TEST_CASE_1 = [ - {"argmax": True, "to_onehot": False, "num_classes": None, "threshold_values": False, "logit_thresh": 0.5}, - torch.tensor([[[0.0, 1.0]], [[2.0, 3.0]]]), - torch.tensor([[[1.0, 1.0]]]), - (1, 1, 2), -] +TEST_CASES = [] +for p in TEST_NDARRAYS: + TEST_CASES.append( + [ + {"argmax": True, "to_onehot": False, "num_classes": None, "threshold_values": False, "logit_thresh": 0.5}, + p([[[0.0, 1.0]], [[2.0, 3.0]]]), + p([[[1.0, 1.0]]]), + (1, 1, 2), + ] + ) -TEST_CASE_2 = [ - {"argmax": True, "to_onehot": True, "num_classes": 2, "threshold_values": False, "logit_thresh": 0.5}, - torch.tensor([[[0.0, 1.0]], [[2.0, 3.0]]]), - torch.tensor([[[0.0, 0.0]], [[1.0, 1.0]]]), - (2, 1, 2), -] + TEST_CASES.append( + [ + {"argmax": True, "to_onehot": True, "num_classes": 2, "threshold_values": False, "logit_thresh": 0.5}, + p([[[0.0, 1.0]], [[2.0, 3.0]]]), + p([[[0.0, 0.0]], [[1.0, 1.0]]]), + (2, 1, 2), + ] + ) -TEST_CASE_3 = [ - {"argmax": False, "to_onehot": False, "num_classes": None, "threshold_values": True, "logit_thresh": 0.6}, - torch.tensor([[[0.0, 1.0], [2.0, 3.0]]]), - torch.tensor([[[0.0, 1.0], [1.0, 1.0]]]), - (1, 2, 2), -] + TEST_CASES.append( + [ + {"argmax": False, "to_onehot": False, "num_classes": None, "threshold_values": True, "logit_thresh": 0.6}, + p([[[0.0, 1.0], [2.0, 3.0]]]), + p([[[0.0, 1.0], [1.0, 1.0]]]), + (1, 2, 2), + ] + ) -TEST_CASE_4 = [ - {"argmax": False, "to_onehot": True, "num_classes": 3}, - torch.tensor(1), - torch.tensor([0.0, 1.0, 0.0]), - (3,), -] + TEST_CASES.append([{"argmax": False, "to_onehot": True, "num_classes": 3}, p(1), p([0.0, 1.0, 0.0]), (3,)]) -TEST_CASE_5 = [ - {"rounding": "torchrounding"}, - torch.tensor([[[0.123, 1.345], [2.567, 3.789]]]), - torch.tensor([[[0.0, 1.0], [3.0, 4.0]]]), - (1, 2, 2), -] + TEST_CASES.append( + [{"rounding": "torchrounding"}, p([[[0.123, 1.345], [2.567, 3.789]]]), p([[[0.0, 1.0], [3.0, 4.0]]]), (1, 2, 2)] + ) class TestAsDiscrete(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5]) + @parameterized.expand(TEST_CASES) def test_value_shape(self, input_param, img, out, expected_shape): result = AsDiscrete(**input_param)(img) - torch.testing.assert_allclose(result, out) + assert_allclose(result, out, rtol=1e-3) self.assertTupleEqual(result.shape, expected_shape) diff --git a/tests/test_as_discreted.py b/tests/test_as_discreted.py index 90e98b297b..dc160d5e46 100644 --- a/tests/test_as_discreted.py +++ b/tests/test_as_discreted.py @@ -11,69 +11,79 @@ import unittest -import torch from parameterized import parameterized from monai.transforms import AsDiscreted +from tests.utils import TEST_NDARRAYS, assert_allclose -TEST_CASE_1 = [ - { - "keys": ["pred", "label"], - "argmax": [True, False], - "to_onehot": True, - "num_classes": 2, - "threshold_values": False, - "logit_thresh": 0.5, - }, - {"pred": torch.tensor([[[0.0, 1.0]], [[2.0, 3.0]]]), "label": torch.tensor([[[0, 1]]])}, - {"pred": torch.tensor([[[0.0, 0.0]], [[1.0, 1.0]]]), "label": torch.tensor([[[1.0, 0.0]], [[0.0, 1.0]]])}, - (2, 1, 2), -] +TEST_CASES = [] +for p in TEST_NDARRAYS: + TEST_CASES.append( + [ + { + "keys": ["pred", "label"], + "argmax": [True, False], + "to_onehot": True, + "num_classes": 2, + "threshold_values": False, + "logit_thresh": 0.5, + }, + {"pred": p([[[0.0, 1.0]], [[2.0, 3.0]]]), "label": p([[[0, 1]]])}, + {"pred": p([[[0.0, 0.0]], [[1.0, 1.0]]]), "label": p([[[1.0, 0.0]], [[0.0, 1.0]]])}, + (2, 1, 2), + ] + ) -TEST_CASE_2 = [ - { - "keys": ["pred", "label"], - "argmax": False, - "to_onehot": False, - "num_classes": None, - "threshold_values": [True, False], - "logit_thresh": 0.6, - }, - {"pred": torch.tensor([[[0.0, 1.0], [2.0, 3.0]]]), "label": torch.tensor([[[0, 1], [1, 1]]])}, - {"pred": torch.tensor([[[0.0, 1.0], [1.0, 1.0]]]), "label": torch.tensor([[[0.0, 1.0], [1.0, 1.0]]])}, - (1, 2, 2), -] + TEST_CASES.append( + [ + { + "keys": ["pred", "label"], + "argmax": False, + "to_onehot": False, + "num_classes": None, + "threshold_values": [True, False], + "logit_thresh": 0.6, + }, + {"pred": p([[[0.0, 1.0], [2.0, 3.0]]]), "label": p([[[0, 1], [1, 1]]])}, + {"pred": p([[[0.0, 1.0], [1.0, 1.0]]]), "label": p([[[0.0, 1.0], [1.0, 1.0]]])}, + (1, 2, 2), + ] + ) -TEST_CASE_3 = [ - { - "keys": ["pred"], - "argmax": True, - "to_onehot": True, - "num_classes": 2, - "threshold_values": False, - "logit_thresh": 0.5, - }, - {"pred": torch.tensor([[[0.0, 1.0]], [[2.0, 3.0]]])}, - {"pred": torch.tensor([[[0.0, 0.0]], [[1.0, 1.0]]])}, - (2, 1, 2), -] + TEST_CASES.append( + [ + { + "keys": ["pred"], + "argmax": True, + "to_onehot": True, + "num_classes": 2, + "threshold_values": False, + "logit_thresh": 0.5, + }, + {"pred": p([[[0.0, 1.0]], [[2.0, 3.0]]])}, + {"pred": p([[[0.0, 0.0]], [[1.0, 1.0]]])}, + (2, 1, 2), + ] + ) -TEST_CASE_4 = [ - {"keys": "pred", "rounding": "torchrounding"}, - {"pred": torch.tensor([[[0.123, 1.345], [2.567, 3.789]]])}, - {"pred": torch.tensor([[[0.0, 1.0], [3.0, 4.0]]])}, - (1, 2, 2), -] + TEST_CASES.append( + [ + {"keys": "pred", "rounding": "torchrounding"}, + {"pred": p([[[0.123, 1.345], [2.567, 3.789]]])}, + {"pred": p([[[0.0, 1.0], [3.0, 4.0]]])}, + (1, 2, 2), + ] + ) class TestAsDiscreted(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) + @parameterized.expand(TEST_CASES) def test_value_shape(self, input_param, test_input, output, expected_shape): result = AsDiscreted(**input_param)(test_input) - torch.testing.assert_allclose(result["pred"], output["pred"]) + assert_allclose(result["pred"], output["pred"], rtol=1e-3) self.assertTupleEqual(result["pred"].shape, expected_shape) if "label" in result: - torch.testing.assert_allclose(result["label"], output["label"]) + assert_allclose(result["label"], output["label"], rtol=1e-3) self.assertTupleEqual(result["label"].shape, expected_shape) diff --git a/tests/test_label_to_contour.py b/tests/test_label_to_contour.py index 8f8f3cc054..e63b581e27 100644 --- a/tests/test_label_to_contour.py +++ b/tests/test_label_to_contour.py @@ -15,108 +15,107 @@ import torch from monai.transforms import LabelToContour +from tests.utils import TEST_NDARRAYS, assert_allclose -expected_output_for_cube = np.array( +expected_output_for_cube = [ [ - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - ] -) - - -def gen_fixed_cube(): + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], +] + + +def gen_fixed_cube(array_type): scale, core_start, core_end = 8, 1, 7 - cube = torch.zeros(scale, scale, scale) + cube = np.zeros((scale, scale, scale)) cube[core_start:core_end, core_start:core_end, core_start:core_end] = torch.ones( core_end - core_start, core_end - core_start, core_end - core_start ) - cube = torch.unsqueeze(cube, 0) + cube = cube[None] batch_size, channels = 10, 6 - cube = cube.repeat(batch_size, channels, 1, 1, 1) - return cube, expected_output_for_cube + cube = np.tile(cube, (batch_size, channels, 1, 1, 1)) + return array_type(cube), array_type(expected_output_for_cube) -def gen_fixed_img(): - img = torch.tensor( +def gen_fixed_img(array_type): + img = np.array( [ [0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1], @@ -124,19 +123,18 @@ def gen_fixed_img(): [0, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], ], - dtype=torch.float32, + dtype=np.float32, ) batch_size, channels = 10, 6 - img = img.repeat(batch_size, channels, 1, 1) - expected_output_for_img = torch.tensor( + img = array_type(np.tile(img, (batch_size, channels, 1, 1))) + expected_output_for_img = array_type( [ [0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1], - ], - dtype=torch.float32, + ] ) return img, expected_output_for_img @@ -145,33 +143,34 @@ class TestContour(unittest.TestCase): def test_contour(self): input_param = {"kernel_type": "Laplace"} - # check 5-dim input data - test_cube, expected_output = gen_fixed_cube() - for cube in test_cube: - test_result_cube = LabelToContour(**input_param)(cube) - self.assertEqual(test_result_cube.shape, cube.shape) + for p in TEST_NDARRAYS: + # check 5-dim input data + test_cube, expected_output = gen_fixed_cube(p) + for cube in test_cube: + test_result_cube = LabelToContour(**input_param)(cube) + self.assertEqual(test_result_cube.shape, cube.shape) - test_result_np = test_result_cube.cpu().numpy() - channels = cube.shape[0] - for channel in range(channels): - np.testing.assert_allclose(test_result_np[channel, ...], expected_output) + channels = cube.shape[0] + for channel in range(channels): + assert_allclose(test_result_cube[channel, ...], expected_output) - # check 4-dim input data - test_img, expected_output = gen_fixed_img() - for img in test_img: - channels = img.shape[0] - test_result_img = LabelToContour(**input_param)(img) - self.assertEqual(test_result_img.shape, img.shape) + # check 4-dim input data + test_img, expected_output = gen_fixed_img(p) + for img in test_img: + channels = img.shape[0] + test_result_img = LabelToContour(**input_param)(img) + self.assertEqual(test_result_img.shape, img.shape) - test_result_np = test_result_img.cpu().numpy() - for channel in range(channels): - np.testing.assert_allclose(test_result_np[channel, ...], expected_output) + for channel in range(channels): + assert_allclose(test_result_img[channel, ...], expected_output) # check invalid input data error_input = torch.rand(1, 2) self.assertRaises(ValueError, LabelToContour(**input_param), error_input) error_input = torch.rand(1, 2, 3, 4, 5) self.assertRaises(ValueError, LabelToContour(**input_param), error_input) + error_input = np.random.rand(1, 2, 3, 4, 5) + self.assertRaises(ValueError, LabelToContour(**input_param), error_input) if __name__ == "__main__": diff --git a/tests/test_label_to_contourd.py b/tests/test_label_to_contourd.py index d3795755c7..922362f1d9 100644 --- a/tests/test_label_to_contourd.py +++ b/tests/test_label_to_contourd.py @@ -15,108 +15,107 @@ import torch from monai.transforms import LabelToContourd +from tests.utils import TEST_NDARRAYS, assert_allclose -expected_output_for_cube = np.array( +expected_output_for_cube = [ [ - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - [ - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - ] -) - - -def gen_fixed_cube(): + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], +] + + +def gen_fixed_cube(array_type): scale, core_start, core_end = 8, 1, 7 - cube = torch.zeros(scale, scale, scale) + cube = np.zeros((scale, scale, scale)) cube[core_start:core_end, core_start:core_end, core_start:core_end] = torch.ones( core_end - core_start, core_end - core_start, core_end - core_start ) - cube = torch.unsqueeze(cube, 0) + cube = cube[None] batch_size, channels = 10, 6 - cube = cube.repeat(batch_size, channels, 1, 1, 1) - return cube, expected_output_for_cube + cube = np.tile(cube, (batch_size, channels, 1, 1, 1)) + return array_type(cube), array_type(expected_output_for_cube) -def gen_fixed_img(): - img = torch.tensor( +def gen_fixed_img(array_type): + img = np.array( [ [0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1], @@ -124,19 +123,19 @@ def gen_fixed_img(): [0, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], ], - dtype=torch.float32, + dtype=np.float32, ) batch_size, channels = 10, 6 - img = img.repeat(batch_size, channels, 1, 1) - expected_output_for_img = torch.tensor( + img = np.tile(img, (batch_size, channels, 1, 1)) + img = array_type(img) + expected_output_for_img = array_type( [ [0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1], - ], - dtype=torch.float32, + ] ) return img, expected_output_for_img @@ -145,31 +144,34 @@ class TestContourd(unittest.TestCase): def test_contour(self): input_param = {"keys": "img", "kernel_type": "Laplace"} - # check 5-dim input data - test_cube, expected_output = gen_fixed_cube() - for cube in test_cube: - test_result_cube = LabelToContourd(**input_param)({"img": cube}) - self.assertEqual(test_result_cube["img"].shape, cube.shape) - - test_result_np = test_result_cube["img"].cpu().numpy() - channels = cube.shape[0] - for channel in range(channels): - np.testing.assert_allclose(test_result_np[channel, ...], expected_output) - - # check 4-dim input data - test_img, expected_output = gen_fixed_img() - for img in test_img: - channels = img.shape[0] - test_result_img = LabelToContourd(**input_param)({"img": img}) - self.assertEqual(test_result_img["img"].shape, img.shape) - - test_result_np = test_result_img["img"].cpu().numpy() - for channel in range(channels): - np.testing.assert_allclose(test_result_np[channel, ...], expected_output) + for p in TEST_NDARRAYS: + # check 5-dim input data + test_cube, expected_output = gen_fixed_cube(p) + for cube in test_cube: + test_result_cube = LabelToContourd(**input_param)({"img": cube}) + self.assertEqual(test_result_cube["img"].shape, cube.shape) + + test_result_np = test_result_cube["img"] + channels = cube.shape[0] + for channel in range(channels): + assert_allclose(test_result_np[channel, ...], expected_output) + + # check 4-dim input data + test_img, expected_output = gen_fixed_img(p) + for img in test_img: + channels = img.shape[0] + test_result_img = LabelToContourd(**input_param)({"img": img}) + self.assertEqual(test_result_img["img"].shape, img.shape) + + test_result_np = test_result_img["img"] + for channel in range(channels): + assert_allclose(test_result_np[channel, ...], expected_output) # check invalid input data error_input = {"img": torch.rand(1, 2)} self.assertRaises(ValueError, LabelToContourd(**input_param), error_input) + error_input = {"img": np.random.rand(1, 2)} + self.assertRaises(ValueError, LabelToContourd(**input_param), error_input) error_input = {"img": torch.rand(1, 2, 3, 4, 5)} self.assertRaises(ValueError, LabelToContourd(**input_param), error_input) diff --git a/tests/test_separable_filter.py b/tests/test_separable_filter.py new file mode 100644 index 0000000000..0183ab9ef9 --- /dev/null +++ b/tests/test_separable_filter.py @@ -0,0 +1,85 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch + +from monai.networks.layers import separable_filtering + + +class SeparableFilterTestCase(unittest.TestCase): + def test_1d(self): + a = torch.tensor([[list(range(10))]], dtype=torch.float) + out = separable_filtering(a, torch.tensor([-1, 0, 1])) + expected = np.array([[[1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, -8.0]]]) + np.testing.assert_allclose(out.cpu().numpy(), expected, rtol=1e-4) + if torch.cuda.is_available(): + out = separable_filtering(a.cuda(), torch.tensor([-1, 0, 1]).cuda()) + np.testing.assert_allclose(out.cpu().numpy(), expected, rtol=1e-4) + + def test_2d(self): + a = torch.tensor([[[list(range(7)), list(range(7, 0, -1)), list(range(7))]]], dtype=torch.float) + expected = np.array( + [ + [28.0, 28.0, 28.0, 28.0, 28.0, 28.0], + [30.0, 34.0, 38.0, 42.0, 46.0, 50.0], + [28.0, 28.0, 28.0, 28.0, 28.0, 28.0], + ] + ) + expected = expected[None][None] + out = separable_filtering(a, [torch.tensor([1, 1, 1]), torch.tensor([2, 2])]) + np.testing.assert_allclose(out.cpu().numpy(), expected, rtol=1e-4) + if torch.cuda.is_available(): + out = separable_filtering(a.cuda(), [torch.tensor([1, 1, 1]).cuda(), torch.tensor([2, 2]).cuda()]) + np.testing.assert_allclose(out.cpu().numpy(), expected, rtol=1e-4) + + def test_3d(self): + a = torch.tensor( + [[list(range(7)), list(range(7)), list(range(7))], [list(range(7)), list(range(7)), list(range(7))]], + dtype=torch.float, + ) + a = a[None][None] + a = a.expand(2, 3, -1, -1, -1) + expected = np.array( + [ + [ + [4.0, 12.0, 24.0, 36.0, 48.0, 60.0, 44.0], + [6.0, 18.0, 36.0, 54.0, 72.0, 90.0, 66.0], + [4.0, 12.0, 24.0, 36.0, 48.0, 60.0, 44.0], + ], + [ + [4.0, 12.0, 24.0, 36.0, 48.0, 60.0, 44.0], + [6.0, 18.0, 36.0, 54.0, 72.0, 90.0, 66.0], + [4.0, 12.0, 24.0, 36.0, 48.0, 60.0, 44.0], + ], + ] + ) + expected = expected + # testing shapes + k = torch.tensor([1, 1, 1]) + for kernel in (k, [k] * 3): + out = separable_filtering(a, kernel) + np.testing.assert_allclose(out.cpu().numpy()[1][2], expected, rtol=1e-4) + if torch.cuda.is_available(): + out = separable_filtering( + a.cuda(), kernel.cuda() if isinstance(kernel, torch.Tensor) else [k.cuda() for k in kernel] + ) + np.testing.assert_allclose(out.cpu().numpy()[0][1], expected, rtol=1e-4) + + def test_wrong_args(self): + with self.assertRaisesRegex(TypeError, ""): + separable_filtering(((1, 1, 1, 2, 3, 2)), torch.ones((2,))) # type: ignore + + +if __name__ == "__main__": + unittest.main()