From 03b096868e168f1b7fc93d0077f5f3a438378afd Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Wed, 28 Feb 2024 22:37:02 +0800 Subject: [PATCH 01/16] add flip transform Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/array.py | 9 +++- monai/transforms/spatial/functional.py | 63 +++++++++++++++++++++++++- 2 files changed, 69 insertions(+), 3 deletions(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 094afdd3c4..56db276ff5 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -34,7 +34,8 @@ from monai.transforms.inverse import InvertibleTransform from monai.transforms.spatial.functional import ( affine_func, - flip, + flip_image, + flip_point, orientation, resize, rotate, @@ -684,6 +685,7 @@ class Flip(InvertibleTransform, LazyTransform): def __init__(self, spatial_axis: Sequence[int] | int | None = None, lazy: bool = False) -> None: LazyTransform.__init__(self, lazy=lazy) self.spatial_axis = spatial_axis + self.operators = [flip_point, flip_image] def __call__(self, img: torch.Tensor, lazy: bool | None = None) -> torch.Tensor: """ @@ -695,7 +697,10 @@ def __call__(self, img: torch.Tensor, lazy: bool | None = None) -> torch.Tensor: """ img = convert_to_tensor(img, track_meta=get_track_meta()) lazy_ = self.lazy if lazy is None else lazy - return flip(img, self.spatial_axis, lazy=lazy_, transform_info=self.get_transform_info()) # type: ignore + for operator in self.operators: + ret = operator(img, self.spatial_axis, lazy=lazy_, transform_info=self.get_transform_info()) + if ret is not None: + return ret def inverse(self, data: torch.Tensor) -> torch.Tensor: self.pop_transform(data) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index add4e7f5ea..3a65df5d80 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -16,6 +16,7 @@ import math import warnings +from copy import deepcopy from enum import Enum import numpy as np @@ -24,6 +25,7 @@ import monai from monai.config import USE_COMPILED from monai.config.type_definitions import NdarrayOrTensor +from monai.data.box_utils import get_spatial_dims from monai.data.meta_obj import get_track_meta from monai.data.meta_tensor import MetaTensor from monai.data.utils import AFFINE_TOL, compute_shape_offset, to_affine_nd @@ -229,7 +231,7 @@ def orientation(img, original_affine, spatial_ornt, lazy, transform_info) -> tor return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else out # type: ignore -def flip(img, sp_axes, lazy, transform_info): +def flip_image(img, sp_axes, lazy, transform_info): """ Functional implementation of flip. This function operates eagerly or lazily according to @@ -245,6 +247,9 @@ def flip(img, sp_axes, lazy, transform_info): lazy: a flag that indicates whether the operation should be performed lazily or not transform_info: a dictionary with the relevant information pertaining to an applied transform. """ + # TODO + if img.meta["kind"] != "pixel": + return None sp_size = img.peek_pending_shape() if isinstance(img, MetaTensor) else img.shape[1:] sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() extra_info = {"axes": sp_axes} # track the spatial axes @@ -265,6 +270,62 @@ def flip(img, sp_axes, lazy, transform_info): return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else out +def flip_point(points, sp_axes, lazy, transform_info): + """ + Functional implementation of flip points. + This function operates eagerly or lazily according to + ``lazy`` (default ``False``). + Args: + points: point coordinates, Nx2 or Nx3 torch tensor or ndarray + sp_axes: spatial axes along which to flip over. Default is None. + The default `axis=None` will flip over all of the axes of the input array. + If axis is negative it counts from the last to the first axis. + If axis is a tuple of ints, flipping is performed on all of the axes + specified in the tuple. + lazy: a flag that indicates whether the operation should be performed lazily or not. + transform_info: a dictionary with the relevant information pertaining to an applied transform. + Returns: + flipped points, with same data type as ``points``, does not share memory with ``points`` + """ + # TODO + if points.meta["kind"] != "point": + return None + if points.meta.get("refer_meta", None) is not None: + spatial_size = points.meta["refer_meta"]["spatial_shape"] + else: + spatial_size = None + spatial_dims: int = get_spatial_dims(points=points[0]) + sp_size = ensure_tuple_rep(spatial_size, spatial_dims) if spatial_size is not None else None + sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() if spatial_size is not None else None + extra_info = {"axes": sp_axes} # track the spatial axes + if sp_axes is None: + sp_axes = tuple(range(0, spatial_dims)) + sp_axes = ensure_tuple(sp_axes) + sp_axes = monai.transforms.utils.map_spatial_axes(points.ndim, sp_axes) # use the axes with channel dim + # axes include the channel dim + xform = torch.eye(int(spatial_dims) + 1, dtype=torch.double) + for axis in sp_axes: + sp = axis - 1 + if sp_size is not None: + xform[sp, sp], xform[sp, -1] = xform[sp, sp] * -1, sp_size[sp] - 1 + else: + xform[sp, sp] *= -1 + meta_info = TraceableTransform.track_transform_meta(points, affine=xform, extra_info=extra_info, lazy=lazy, transform_info=transform_info) + + # flip box + out = deepcopy(_maybe_new_metatensor(points)) + if lazy: + return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else meta_info + if sp_size is None: + warnings.warn("''spatial_size'' is None, will flip in the world coordinates.") + for _axes in sp_axes: + out[..., _axes-1] = -points[..., _axes-1] + else: + for _axes in sp_axes: + out[..., _axes-1] = sp_size[_axes-1] - points[..., _axes-1] + return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else out + + def resize( img, out_size, mode, align_corners, dtype, input_ndim, anti_aliasing, anti_aliasing_sigma, lazy, transform_info ): From bb004570a707fad00d3014e43bd2a75ffd45d8d8 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Thu, 29 Feb 2024 15:48:30 +0800 Subject: [PATCH 02/16] enable inverse Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/functional.py | 34 ++++++++++++-------------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 3a65df5d80..6f8ff861c5 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -287,42 +287,40 @@ def flip_point(points, sp_axes, lazy, transform_info): Returns: flipped points, with same data type as ``points``, does not share memory with ``points`` """ - # TODO + # TODO: update to use enum if points.meta["kind"] != "point": return None if points.meta.get("refer_meta", None) is not None: - spatial_size = points.meta["refer_meta"]["spatial_shape"] + sp_size = points.meta["refer_meta"]["spatial_shape"] else: - spatial_size = None - spatial_dims: int = get_spatial_dims(points=points[0]) - sp_size = ensure_tuple_rep(spatial_size, spatial_dims) if spatial_size is not None else None - sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() if spatial_size is not None else None + sp_size = None + sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() if sp_size is not None else None extra_info = {"axes": sp_axes} # track the spatial axes - if sp_axes is None: - sp_axes = tuple(range(0, spatial_dims)) - sp_axes = ensure_tuple(sp_axes) - sp_axes = monai.transforms.utils.map_spatial_axes(points.ndim, sp_axes) # use the axes with channel dim + axes = monai.transforms.utils.map_spatial_axes(points.ndim, sp_axes) # use the axes with channel dim + rank = points.peek_pending_rank() if isinstance(points, MetaTensor) else torch.tensor(3.0, dtype=torch.double) # axes include the channel dim - xform = torch.eye(int(spatial_dims) + 1, dtype=torch.double) - for axis in sp_axes: + xform = torch.eye(int(rank) + 1, dtype=torch.double) + for axis in axes: sp = axis - 1 if sp_size is not None: xform[sp, sp], xform[sp, -1] = xform[sp, sp] * -1, sp_size[sp] - 1 else: xform[sp, sp] *= -1 - meta_info = TraceableTransform.track_transform_meta(points, affine=xform, extra_info=extra_info, lazy=lazy, transform_info=transform_info) + meta_info = TraceableTransform.track_transform_meta( + points, affine=xform, extra_info=extra_info, lazy=lazy, transform_info=transform_info + ) # flip box out = deepcopy(_maybe_new_metatensor(points)) if lazy: return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else meta_info if sp_size is None: - warnings.warn("''spatial_size'' is None, will flip in the world coordinates.") - for _axes in sp_axes: - out[..., _axes-1] = -points[..., _axes-1] + warnings.warn("''sp_size'' is None, will flip in the world coordinates.") + for _axes in axes: + out[..., _axes - 1] = - points[..., _axes - 1] else: - for _axes in sp_axes: - out[..., _axes-1] = sp_size[_axes-1] - points[..., _axes-1] + for _axes in axes: + out[..., _axes - 1] = sp_size[_axes - 1] - points[..., _axes - 1] return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else out From f5a356298a0e151e378a92890dd466d4b91c9963 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 29 Feb 2024 16:22:09 +0000 Subject: [PATCH 03/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/transforms/spatial/functional.py | 1 - 1 file changed, 1 deletion(-) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 6f8ff861c5..3a65263e8e 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -25,7 +25,6 @@ import monai from monai.config import USE_COMPILED from monai.config.type_definitions import NdarrayOrTensor -from monai.data.box_utils import get_spatial_dims from monai.data.meta_obj import get_track_meta from monai.data.meta_tensor import MetaTensor from monai.data.utils import AFFINE_TOL, compute_shape_offset, to_affine_nd From 2b114685308182f6f0221aa713cae50cc250f67a Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Fri, 1 Mar 2024 16:38:55 +0800 Subject: [PATCH 04/16] minor fix Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/functional.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 6f8ff861c5..731fe0b542 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -52,7 +52,17 @@ cupy_ndi, _ = optional_import("cupyx.scipy.ndimage") np_ndi, _ = optional_import("scipy.ndimage") -__all__ = ["spatial_resample", "orientation", "flip", "resize", "rotate", "zoom", "rotate90", "affine_func"] +__all__ = [ + "spatial_resample", + "orientation", + "flip_image", + "flip_point", + "resize", + "rotate", + "zoom", + "rotate90", + "affine_func", +] def _maybe_new_metatensor(img, dtype=None, device=None): @@ -248,7 +258,7 @@ def flip_image(img, sp_axes, lazy, transform_info): transform_info: a dictionary with the relevant information pertaining to an applied transform. """ # TODO - if img.meta["kind"] != "pixel": + if img.meta.get("kind", "pixel") != "pixel": return None sp_size = img.peek_pending_shape() if isinstance(img, MetaTensor) else img.shape[1:] sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() @@ -288,7 +298,7 @@ def flip_point(points, sp_axes, lazy, transform_info): flipped points, with same data type as ``points``, does not share memory with ``points`` """ # TODO: update to use enum - if points.meta["kind"] != "point": + if points.meta.get("kind", "pixel") != "point": return None if points.meta.get("refer_meta", None) is not None: sp_size = points.meta["refer_meta"]["spatial_shape"] @@ -317,7 +327,7 @@ def flip_point(points, sp_axes, lazy, transform_info): if sp_size is None: warnings.warn("''sp_size'' is None, will flip in the world coordinates.") for _axes in axes: - out[..., _axes - 1] = - points[..., _axes - 1] + out[..., _axes - 1] = -points[..., _axes - 1] else: for _axes in axes: out[..., _axes - 1] = sp_size[_axes - 1] - points[..., _axes - 1] From 3b2619e23502cc75c5bf5e5c9573c7269f05e03a Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Fri, 1 Mar 2024 16:50:56 +0800 Subject: [PATCH 05/16] minor fix Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/functional.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 4d961abcd0..1538821aeb 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -257,7 +257,8 @@ def flip_image(img, sp_axes, lazy, transform_info): transform_info: a dictionary with the relevant information pertaining to an applied transform. """ # TODO - if img.meta.get("kind", "pixel") != "pixel": + kind = img.meta.get("kind", "pixel") if isinstance(img, MetaTensor) else "pixel" + if kind != "pixel": return None sp_size = img.peek_pending_shape() if isinstance(img, MetaTensor) else img.shape[1:] sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() @@ -297,7 +298,8 @@ def flip_point(points, sp_axes, lazy, transform_info): flipped points, with same data type as ``points``, does not share memory with ``points`` """ # TODO: update to use enum - if points.meta.get("kind", "pixel") != "point": + kind = points.meta.get("kind", "pixel") if isinstance(points, MetaTensor) else "pixel" + if kind != "point": return None if points.meta.get("refer_meta", None) is not None: sp_size = points.meta["refer_meta"]["spatial_shape"] From ac55361ea2ac2d5ea0894e56ab77a7448160e5cf Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:26:17 +0800 Subject: [PATCH 06/16] add unittest Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/array.py | 4 ++-- monai/transforms/spatial/functional.py | 5 +++-- tests/test_flip.py | 26 ++++++++++++++++++++++++++ tests/test_flipd.py | 26 ++++++++++++++++++++++++++ 4 files changed, 57 insertions(+), 4 deletions(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 56db276ff5..fd624f917a 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -687,7 +687,7 @@ def __init__(self, spatial_axis: Sequence[int] | int | None = None, lazy: bool = self.spatial_axis = spatial_axis self.operators = [flip_point, flip_image] - def __call__(self, img: torch.Tensor, lazy: bool | None = None) -> torch.Tensor: + def __call__(self, img: torch.Tensor, lazy: bool | None = None) -> torch.Tensor: # type: ignore[return] """ Args: img: channel first array, must have shape: (num_channels, H[, W, ..., ]) @@ -698,7 +698,7 @@ def __call__(self, img: torch.Tensor, lazy: bool | None = None) -> torch.Tensor: img = convert_to_tensor(img, track_meta=get_track_meta()) lazy_ = self.lazy if lazy is None else lazy for operator in self.operators: - ret = operator(img, self.spatial_axis, lazy=lazy_, transform_info=self.get_transform_info()) + ret: torch.Tensor = operator(img, self.spatial_axis, lazy=lazy_, transform_info=self.get_transform_info()) if ret is not None: return ret diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 1538821aeb..66a91514e7 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -286,7 +286,8 @@ def flip_point(points, sp_axes, lazy, transform_info): This function operates eagerly or lazily according to ``lazy`` (default ``False``). Args: - points: point coordinates, Nx2 or Nx3 torch tensor or ndarray + points: point coordinates, represented by a torch tensor or ndarray with dimensions of 1xNx2 or 1xNx3. + Here 1 represents the channel dimension. sp_axes: spatial axes along which to flip over. Default is None. The default `axis=None` will flip over all of the axes of the input array. If axis is negative it counts from the last to the first axis. @@ -320,10 +321,10 @@ def flip_point(points, sp_axes, lazy, transform_info): meta_info = TraceableTransform.track_transform_meta( points, affine=xform, extra_info=extra_info, lazy=lazy, transform_info=transform_info ) - # flip box out = deepcopy(_maybe_new_metatensor(points)) if lazy: + raise NotImplementedError return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else meta_info if sp_size is None: warnings.warn("''sp_size'' is None, will flip in the world coordinates.") diff --git a/tests/test_flip.py b/tests/test_flip.py index 789ec86920..971da97dd2 100644 --- a/tests/test_flip.py +++ b/tests/test_flip.py @@ -12,6 +12,7 @@ from __future__ import annotations import unittest +from copy import deepcopy import numpy as np import torch @@ -32,6 +33,15 @@ for device in TEST_DEVICES: TORCH_CASES.append([[0, 1], torch.zeros((1, 3, 2)), track_meta, *device]) +POINT_2D_WITH_REFER = MetaTensor( + [[[3, 4], [5, 7], [6, 2], [7, 8]]], meta={"kind": "point", "refer_meta": {"spatial_shape": (10, 10)}} +) +POINT_3D = MetaTensor([[[3, 4, 5], [5, 7, 6], [6, 2, 7]]], meta={"kind": "point"}) +POINT_CASES = [] +for spatial_axis in [[0], [1], [0, 1]]: + for point in [POINT_2D_WITH_REFER, POINT_3D]: + POINT_CASES.append([spatial_axis, point]) + class TestFlip(NumpyImageTestCase2D): @@ -73,6 +83,22 @@ def test_torch(self, spatial_axis, img: torch.Tensor, track_meta: bool, device): with self.assertRaisesRegex(ValueError, "MetaTensor"): xform.inverse(res) + @parameterized.expand(POINT_CASES) + def test_points(self, spatial_axis, point): + init_param = {"spatial_axis": spatial_axis} + xform = Flip(**init_param) + res = xform(point) # type: ignore[arg-type] + self.assertEqual(point.shape, res.shape) + expected = deepcopy(point) + if point.meta.get("refer_meta", None) is not None: + for _axes in spatial_axis: + expected[..., _axes] = (10, 10)[_axes] - point[..., _axes] + else: + for _axes in spatial_axis: + expected[..., _axes] = -point[..., _axes] + assert_allclose(res, expected, type_test="tensor") + test_local_inversion(xform, res, point) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_flipd.py b/tests/test_flipd.py index 277f387051..f91e896665 100644 --- a/tests/test_flipd.py +++ b/tests/test_flipd.py @@ -12,6 +12,7 @@ from __future__ import annotations import unittest +from copy import deepcopy import numpy as np import torch @@ -33,6 +34,15 @@ for device in TEST_DEVICES: TORCH_CASES.append([[0, 1], torch.zeros((1, 3, 2)), track_meta, *device]) +POINT_2D_WITH_REFER = MetaTensor( + [[[3, 4], [5, 7], [6, 2], [7, 8]]], meta={"kind": "point", "refer_meta": {"spatial_shape": (10, 10)}} +) +POINT_3D = MetaTensor([[[3, 4, 5], [5, 7, 6], [6, 2, 7]]], meta={"kind": "point"}) +POINT_CASES = [] +for spatial_axis in [[0], [1], [0, 1]]: + for point in [POINT_2D_WITH_REFER, POINT_3D]: + POINT_CASES.append([spatial_axis, point]) + class TestFlipd(NumpyImageTestCase2D): @@ -80,6 +90,22 @@ def test_meta_dict(self): res = xform({"image": torch.zeros(1, 3, 4)}) self.assertTrue(res["image"].applied_operations == res["image_transforms"]) + @parameterized.expand(POINT_CASES) + def test_points(self, spatial_axis, point): + init_param = {"keys": "point", "spatial_axis": spatial_axis} + xform = Flipd(**init_param) + res = xform({"point": point}) # type: ignore[arg-type] + self.assertEqual(point.shape, res["point"].shape) + expected = deepcopy(point) + if point.meta.get("refer_meta", None) is not None: + for _axes in spatial_axis: + expected[..., _axes] = (10, 10)[_axes] - point[..., _axes] + else: + for _axes in spatial_axis: + expected[..., _axes] = -point[..., _axes] + assert_allclose(res["point"], expected, type_test="tensor") + test_local_inversion(xform, {"point": res["point"]}, {"point": point}, "point") + if __name__ == "__main__": unittest.main() From 7e6f134b5e60513f85f4235a2c29bb91017b204f Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:39:10 +0800 Subject: [PATCH 07/16] minor fix Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/functional.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 66a91514e7..fa01952914 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -285,6 +285,7 @@ def flip_point(points, sp_axes, lazy, transform_info): Functional implementation of flip points. This function operates eagerly or lazily according to ``lazy`` (default ``False``). + Args: points: point coordinates, represented by a torch tensor or ndarray with dimensions of 1xNx2 or 1xNx3. Here 1 represents the channel dimension. @@ -295,8 +296,6 @@ def flip_point(points, sp_axes, lazy, transform_info): specified in the tuple. lazy: a flag that indicates whether the operation should be performed lazily or not. transform_info: a dictionary with the relevant information pertaining to an applied transform. - Returns: - flipped points, with same data type as ``points``, does not share memory with ``points`` """ # TODO: update to use enum kind = points.meta.get("kind", "pixel") if isinstance(points, MetaTensor) else "pixel" From c136fae055e5cf5e1d19768d1dc89a4dbf59e810 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Thu, 30 May 2024 11:38:20 +0800 Subject: [PATCH 08/16] add flip helper Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/functional.py | 47 +++++++++++--------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index fa01952914..3f40d03f5e 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -240,6 +240,24 @@ def orientation(img, original_affine, spatial_ornt, lazy, transform_info) -> tor return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else out # type: ignore +def flip_helper(data, sp_size, sp_axes, lazy, transform_info): + sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() if sp_size is not None else None + extra_info = {"axes": sp_axes} # track the spatial axes + axes = monai.transforms.utils.map_spatial_axes(data.ndim, sp_axes) # use the axes with channel dim + rank = data.peek_pending_rank() if isinstance(data, MetaTensor) else torch.tensor(3.0, dtype=torch.double) + # axes include the channel dim + xform = torch.eye(int(rank) + 1, dtype=torch.double) + for axis in axes: + sp = axis - 1 + if sp_size is not None: + xform[sp, sp], xform[sp, -1] = xform[sp, sp] * -1, sp_size[sp] - 1 + else: + xform[sp, sp] *= -1 + meta_info = TraceableTransform.track_transform_meta( + data, affine=xform, extra_info=extra_info, lazy=lazy, transform_info=transform_info + ) + return axes, meta_info + def flip_image(img, sp_axes, lazy, transform_info): """ Functional implementation of flip. @@ -261,18 +279,7 @@ def flip_image(img, sp_axes, lazy, transform_info): if kind != "pixel": return None sp_size = img.peek_pending_shape() if isinstance(img, MetaTensor) else img.shape[1:] - sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() - extra_info = {"axes": sp_axes} # track the spatial axes - axes = monai.transforms.utils.map_spatial_axes(img.ndim, sp_axes) # use the axes with channel dim - rank = img.peek_pending_rank() if isinstance(img, MetaTensor) else torch.tensor(3.0, dtype=torch.double) - # axes include the channel dim - xform = torch.eye(int(rank) + 1, dtype=torch.double) - for axis in axes: - sp = axis - 1 - xform[sp, sp], xform[sp, -1] = xform[sp, sp] * -1, sp_size[sp] - 1 - meta_info = TraceableTransform.track_transform_meta( - img, sp_size=sp_size, affine=xform, extra_info=extra_info, transform_info=transform_info, lazy=lazy - ) + axes, meta_info = flip_helper(img, sp_size, sp_axes, lazy, transform_info) out = _maybe_new_metatensor(img) if lazy: return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else meta_info @@ -305,21 +312,7 @@ def flip_point(points, sp_axes, lazy, transform_info): sp_size = points.meta["refer_meta"]["spatial_shape"] else: sp_size = None - sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() if sp_size is not None else None - extra_info = {"axes": sp_axes} # track the spatial axes - axes = monai.transforms.utils.map_spatial_axes(points.ndim, sp_axes) # use the axes with channel dim - rank = points.peek_pending_rank() if isinstance(points, MetaTensor) else torch.tensor(3.0, dtype=torch.double) - # axes include the channel dim - xform = torch.eye(int(rank) + 1, dtype=torch.double) - for axis in axes: - sp = axis - 1 - if sp_size is not None: - xform[sp, sp], xform[sp, -1] = xform[sp, sp] * -1, sp_size[sp] - 1 - else: - xform[sp, sp] *= -1 - meta_info = TraceableTransform.track_transform_meta( - points, affine=xform, extra_info=extra_info, lazy=lazy, transform_info=transform_info - ) + axes, meta_info = flip_helper(points, sp_size, sp_axes, lazy, transform_info) # flip box out = deepcopy(_maybe_new_metatensor(points)) if lazy: From ac139c983f725de90448ed8912f2b7d18220e4ef Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Thu, 30 May 2024 11:43:04 +0800 Subject: [PATCH 09/16] minor fix Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/functional.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 3f40d03f5e..dadb364fa8 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -258,6 +258,7 @@ def flip_helper(data, sp_size, sp_axes, lazy, transform_info): ) return axes, meta_info + def flip_image(img, sp_axes, lazy, transform_info): """ Functional implementation of flip. From 4abaa8ee41d102e9e8efeed70431ab239011112f Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Wed, 24 Apr 2024 17:15:58 +0800 Subject: [PATCH 10/16] Update pycln version (#7704) Fixes #7703 ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- .github/workflows/pythonapp-min.yml | 2 ++ .github/workflows/pythonapp.yml | 2 ++ .pre-commit-config.yaml | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pythonapp-min.yml b/.github/workflows/pythonapp-min.yml index bbe7579774..dffae10558 100644 --- a/.github/workflows/pythonapp-min.yml +++ b/.github/workflows/pythonapp-min.yml @@ -9,6 +9,8 @@ on: - main - releasing/* pull_request: + head_ref-ignore: + - dev concurrency: # automatically cancel the previously triggered workflows when there's a newer version diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index b011e65cf1..4f77389991 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -9,6 +9,8 @@ on: - main - releasing/* pull_request: + head_ref-ignore: + - dev concurrency: # automatically cancel the previously triggered workflows when there's a newer version diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 14b41bbeb8..0087f87578 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: )$ - repo: https://github.com/hadialqattan/pycln - rev: v2.1.3 + rev: v2.4.0 hooks: - id: pycln args: [--config=pyproject.toml] From 9456a8b2901349e4a7f17ad2f48dff9c522475ee Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Thu, 30 May 2024 13:06:36 +0800 Subject: [PATCH 11/16] Revert "Update pycln version (#7704)" This reverts commit 4abaa8ee41d102e9e8efeed70431ab239011112f. --- .github/workflows/pythonapp-min.yml | 2 -- .github/workflows/pythonapp.yml | 2 -- .pre-commit-config.yaml | 2 +- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/pythonapp-min.yml b/.github/workflows/pythonapp-min.yml index dffae10558..bbe7579774 100644 --- a/.github/workflows/pythonapp-min.yml +++ b/.github/workflows/pythonapp-min.yml @@ -9,8 +9,6 @@ on: - main - releasing/* pull_request: - head_ref-ignore: - - dev concurrency: # automatically cancel the previously triggered workflows when there's a newer version diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 4f77389991..b011e65cf1 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -9,8 +9,6 @@ on: - main - releasing/* pull_request: - head_ref-ignore: - - dev concurrency: # automatically cancel the previously triggered workflows when there's a newer version diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0087f87578..14b41bbeb8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: )$ - repo: https://github.com/hadialqattan/pycln - rev: v2.4.0 + rev: v2.1.3 hooks: - id: pycln args: [--config=pyproject.toml] From 4a94227977f7b2d190033dee8807d9e0e4807ead Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Thu, 30 May 2024 13:11:36 +0800 Subject: [PATCH 12/16] use enum Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/functional.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index dadb364fa8..0cdc890181 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -45,6 +45,7 @@ fall_back_tuple, optional_import, ) +from monai.utils.enums import MetaKeys, KindKeys nib, has_nib = optional_import("nibabel") cupy, _ = optional_import("cupy") @@ -275,9 +276,8 @@ def flip_image(img, sp_axes, lazy, transform_info): lazy: a flag that indicates whether the operation should be performed lazily or not transform_info: a dictionary with the relevant information pertaining to an applied transform. """ - # TODO - kind = img.meta.get("kind", "pixel") if isinstance(img, MetaTensor) else "pixel" - if kind != "pixel": + kind = img.meta.get(MetaKeys.KIND, KindKeys.PIXEL) if isinstance(img, MetaTensor) else KindKeys.PIXEL + if kind != KindKeys.PIXEL: return None sp_size = img.peek_pending_shape() if isinstance(img, MetaTensor) else img.shape[1:] axes, meta_info = flip_helper(img, sp_size, sp_axes, lazy, transform_info) @@ -305,9 +305,8 @@ def flip_point(points, sp_axes, lazy, transform_info): lazy: a flag that indicates whether the operation should be performed lazily or not. transform_info: a dictionary with the relevant information pertaining to an applied transform. """ - # TODO: update to use enum - kind = points.meta.get("kind", "pixel") if isinstance(points, MetaTensor) else "pixel" - if kind != "point": + kind = points.meta.get(MetaKeys.KIND, KindKeys.PIXEL) if isinstance(points, MetaTensor) else KindKeys.PIXEL + if kind != KindKeys.POINT: return None if points.meta.get("refer_meta", None) is not None: sp_size = points.meta["refer_meta"]["spatial_shape"] From 8bca179dede40c50b05fba6ca03869f046440270 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Thu, 30 May 2024 15:56:47 +0800 Subject: [PATCH 13/16] add comments Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/functional.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 0cdc890181..3d110b58ce 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -308,6 +308,7 @@ def flip_point(points, sp_axes, lazy, transform_info): kind = points.meta.get(MetaKeys.KIND, KindKeys.PIXEL) if isinstance(points, MetaTensor) else KindKeys.PIXEL if kind != KindKeys.POINT: return None + # TODO: use enum if points.meta.get("refer_meta", None) is not None: sp_size = points.meta["refer_meta"]["spatial_shape"] else: @@ -316,6 +317,7 @@ def flip_point(points, sp_axes, lazy, transform_info): # flip box out = deepcopy(_maybe_new_metatensor(points)) if lazy: + # TODO: add lazy support raise NotImplementedError return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else meta_info if sp_size is None: From 21d7fc3376c66663cbb7a126581509d907b12b09 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 30 Jul 2024 23:03:02 +0800 Subject: [PATCH 14/16] add apply_affine_to_points Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/functional.py | 51 +++++++++++++------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 3d110b58ce..d4b1b5762c 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -33,7 +33,7 @@ from monai.transforms.intensity.array import GaussianSmooth from monai.transforms.inverse import TraceableTransform from monai.transforms.utils import create_rotate, create_translate, resolves_modes, scale_affine -from monai.transforms.utils_pytorch_numpy_unification import allclose +from monai.transforms.utils_pytorch_numpy_unification import allclose, concatenate from monai.utils import ( LazyAttr, TraceKeys, @@ -241,8 +241,9 @@ def orientation(img, original_affine, spatial_ornt, lazy, transform_info) -> tor return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else out # type: ignore -def flip_helper(data, sp_size, sp_axes, lazy, transform_info): - sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() if sp_size is not None else None +def flip_helper(data, sp_axes, lazy, transform_info): + sp_size = data.peek_pending_shape() if isinstance(data, MetaTensor) else data.shape[1:] + sp_size = convert_to_numpy(sp_size, wrap_sequence=True).tolist() extra_info = {"axes": sp_axes} # track the spatial axes axes = monai.transforms.utils.map_spatial_axes(data.ndim, sp_axes) # use the axes with channel dim rank = data.peek_pending_rank() if isinstance(data, MetaTensor) else torch.tensor(3.0, dtype=torch.double) @@ -250,14 +251,13 @@ def flip_helper(data, sp_size, sp_axes, lazy, transform_info): xform = torch.eye(int(rank) + 1, dtype=torch.double) for axis in axes: sp = axis - 1 - if sp_size is not None: - xform[sp, sp], xform[sp, -1] = xform[sp, sp] * -1, sp_size[sp] - 1 - else: - xform[sp, sp] *= -1 + if data.kind == KindKeys.PIXEL: + xform[sp, -1] = sp_size[sp] - 1 + xform[sp, sp] = xform[sp, sp] * -1 meta_info = TraceableTransform.track_transform_meta( data, affine=xform, extra_info=extra_info, lazy=lazy, transform_info=transform_info ) - return axes, meta_info + return axes, meta_info, xform def flip_image(img, sp_axes, lazy, transform_info): @@ -279,8 +279,7 @@ def flip_image(img, sp_axes, lazy, transform_info): kind = img.meta.get(MetaKeys.KIND, KindKeys.PIXEL) if isinstance(img, MetaTensor) else KindKeys.PIXEL if kind != KindKeys.PIXEL: return None - sp_size = img.peek_pending_shape() if isinstance(img, MetaTensor) else img.shape[1:] - axes, meta_info = flip_helper(img, sp_size, sp_axes, lazy, transform_info) + axes, meta_info, _ = flip_helper(img, sp_axes, lazy, transform_info) out = _maybe_new_metatensor(img) if lazy: return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else meta_info @@ -308,25 +307,14 @@ def flip_point(points, sp_axes, lazy, transform_info): kind = points.meta.get(MetaKeys.KIND, KindKeys.PIXEL) if isinstance(points, MetaTensor) else KindKeys.PIXEL if kind != KindKeys.POINT: return None - # TODO: use enum - if points.meta.get("refer_meta", None) is not None: - sp_size = points.meta["refer_meta"]["spatial_shape"] - else: - sp_size = None - axes, meta_info = flip_helper(points, sp_size, sp_axes, lazy, transform_info) - # flip box - out = deepcopy(_maybe_new_metatensor(points)) + _, meta_info, xform = flip_helper(points, sp_axes, lazy, transform_info) + + out = _maybe_new_metatensor(points) if lazy: # TODO: add lazy support raise NotImplementedError return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else meta_info - if sp_size is None: - warnings.warn("''sp_size'' is None, will flip in the world coordinates.") - for _axes in axes: - out[..., _axes - 1] = -points[..., _axes - 1] - else: - for _axes in axes: - out[..., _axes - 1] = sp_size[_axes - 1] - points[..., _axes - 1] + out = apply_affine_to_points(out[0], xform, dtype=torch.float64).unsqueeze(0) return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else out @@ -675,3 +663,16 @@ def affine_func( out = _maybe_new_metatensor(img, dtype=torch.float32, device=resampler.device) out = out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else out return out if image_only else (out, affine) + + +def apply_affine_to_points(data, affine, dtype): + data = convert_to_tensor(data, track_meta=get_track_meta()) + data_: torch.Tensor = convert_to_tensor(data, track_meta=False, dtype=dtype) + + homogeneous = concatenate((data_, torch.ones((data_.shape[0], 1))), axis=1) + transformed_homogeneous = torch.matmul(affine, homogeneous.T) + transformed_coordinates = transformed_homogeneous[:-1].T + out, *_ = convert_to_dst_type(transformed_coordinates, data, dtype=dtype) + + return out + From a77b8c6f4e14ec0f842758e104eb2558c7fabbc9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 30 Jul 2024 15:03:45 +0000 Subject: [PATCH 15/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/transforms/spatial/functional.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index d4b1b5762c..b6168a3bcb 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -16,7 +16,6 @@ import math import warnings -from copy import deepcopy from enum import Enum import numpy as np @@ -675,4 +674,3 @@ def apply_affine_to_points(data, affine, dtype): out, *_ = convert_to_dst_type(transformed_coordinates, data, dtype=dtype) return out - From 812820a225a90fdda961c641f02003d733d305f0 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Thu, 1 Aug 2024 18:54:29 +0800 Subject: [PATCH 16/16] update Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/transforms/spatial/functional.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index b6168a3bcb..e5712ee239 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -313,6 +313,7 @@ def flip_point(points, sp_axes, lazy, transform_info): # TODO: add lazy support raise NotImplementedError return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else meta_info + # TODO: use CoordinateTransformd.apply_affine_to_points instead out = apply_affine_to_points(out[0], xform, dtype=torch.float64).unsqueeze(0) return out.copy_meta_from(meta_info) if isinstance(out, MetaTensor) else out