From ac3c60dff835a101bc4425a1e370ff8d62ecdb7f Mon Sep 17 00:00:00 2001 From: Saurav Maheshkar Date: Thu, 27 Jul 2023 15:32:34 +0530 Subject: [PATCH 1/6] add `channel_wise` in `RandShiftIntensity` Signed-off-by: KumoLiu --- monai/networks/nets/vnet.py | 36 +++++++++++----- monai/transforms/intensity/array.py | 53 +++++++++++++++++++----- monai/transforms/intensity/dictionary.py | 19 +++++++-- 3 files changed, 85 insertions(+), 23 deletions(-) diff --git a/monai/networks/nets/vnet.py b/monai/networks/nets/vnet.py index 697547093a..d89eb8ae03 100644 --- a/monai/networks/nets/vnet.py +++ b/monai/networks/nets/vnet.py @@ -16,6 +16,7 @@ from monai.networks.blocks.convolutions import Convolution from monai.networks.layers.factories import Act, Conv, Dropout, Norm, split_args +from monai.utils import deprecated_arg __all__ = ["VNet"] @@ -133,7 +134,7 @@ def __init__( out_channels: int, nconvs: int, act: tuple[str, dict] | str, - dropout_prob: float | None = None, + dropout_prob: tuple[float | None, float] = (None, 0.5), dropout_dim: int = 3, ): super().__init__() @@ -144,8 +145,8 @@ def __init__( self.up_conv = conv_trans_type(in_channels, out_channels // 2, kernel_size=2, stride=2) self.bn1 = norm_type(out_channels // 2) - self.dropout = dropout_type(dropout_prob) if dropout_prob is not None else None - self.dropout2 = dropout_type(0.5) + self.dropout = dropout_type(dropout_prob[0]) if dropout_prob[0] is not None else None + self.dropout2 = dropout_type(dropout_prob[1]) self.act_function1 = get_acti_layer(act, out_channels // 2) self.act_function2 = get_acti_layer(act, out_channels) self.ops = _make_nconv(spatial_dims, out_channels, nconvs, act) @@ -206,8 +207,9 @@ class VNet(nn.Module): The value should meet the condition that ``16 % in_channels == 0``. out_channels: number of output channels for the network. Defaults to 1. act: activation type in the network. Defaults to ``("elu", {"inplace": True})``. - dropout_prob: dropout ratio. Defaults to 0.5. - dropout_dim: determine the dimensions of dropout. Defaults to 3. + dropout_prob_down: dropout ratio for DownTransition blocks. Defaults to 0.5. + dropout_prob_up: dropout ratio for UpTransition blocks. Defaults to (0.5, 0.5). + dropout_dim: determine the dimensions of dropout. Defaults to (0.5, 0.5). - ``dropout_dim = 1``, randomly zeroes some of the elements for each channel. - ``dropout_dim = 2``, Randomly zeroes out entire channels (a channel is a 2D feature map). @@ -216,15 +218,29 @@ class VNet(nn.Module): According to `Performance Tuning Guide `_, if a conv layer is directly followed by a batch norm layer, bias should be False. + .. deprecated:: 1.2 + ``dropout_prob`` is deprecated in favor of ``dropout_prob_down`` and ``dropout_prob_up``. + """ + @deprecated_arg( + name="dropout_prob", + since="1.2", + new_name="dropout_prob_down", + msg_suffix="please use `dropout_prob_down` instead.", + ) + @deprecated_arg( + name="dropout_prob", since="1.2", new_name="dropout_prob_up", msg_suffix="please use `dropout_prob_up` instead." + ) def __init__( self, spatial_dims: int = 3, in_channels: int = 1, out_channels: int = 1, act: tuple[str, dict] | str = ("elu", {"inplace": True}), - dropout_prob: float = 0.5, + dropout_prob: float | None = 0.5, # deprecated + dropout_prob_down: float | None = 0.5, + dropout_prob_up: tuple[float | None, float] = (0.5, 0.5), dropout_dim: int = 3, bias: bool = False, ): @@ -236,10 +252,10 @@ def __init__( self.in_tr = InputTransition(spatial_dims, in_channels, 16, act, bias=bias) self.down_tr32 = DownTransition(spatial_dims, 16, 1, act, bias=bias) self.down_tr64 = DownTransition(spatial_dims, 32, 2, act, bias=bias) - self.down_tr128 = DownTransition(spatial_dims, 64, 3, act, dropout_prob=dropout_prob, bias=bias) - self.down_tr256 = DownTransition(spatial_dims, 128, 2, act, dropout_prob=dropout_prob, bias=bias) - self.up_tr256 = UpTransition(spatial_dims, 256, 256, 2, act, dropout_prob=dropout_prob) - self.up_tr128 = UpTransition(spatial_dims, 256, 128, 2, act, dropout_prob=dropout_prob) + self.down_tr128 = DownTransition(spatial_dims, 64, 3, act, dropout_prob=dropout_prob_down, bias=bias) + self.down_tr256 = DownTransition(spatial_dims, 128, 2, act, dropout_prob=dropout_prob_down, bias=bias) + self.up_tr256 = UpTransition(spatial_dims, 256, 256, 2, act, dropout_prob=dropout_prob_up) + self.up_tr128 = UpTransition(spatial_dims, 256, 128, 2, act, dropout_prob=dropout_prob_up) self.up_tr64 = UpTransition(spatial_dims, 128, 64, 1, act) self.up_tr32 = UpTransition(spatial_dims, 64, 32, 1, act) self.out_tr = OutputTransition(spatial_dims, 32, out_channels, act, bias=bias) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index f8eadcfb1b..374ac62688 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -255,13 +255,17 @@ class RandShiftIntensity(RandomizableTransform): backend = [TransformBackends.TORCH, TransformBackends.NUMPY] - def __init__(self, offsets: tuple[float, float] | float, safe: bool = False, prob: float = 0.1) -> None: + def __init__( + self, offsets: tuple[float, float] | float, safe: bool = False, channel_wise: bool = False, prob: float = 0.1 + ) -> None: """ Args: offsets: offset range to randomly shift. if single number, offset value is picked from (-offsets, offsets). safe: if `True`, then do safe dtype convert when intensity overflow. default to `False`. E.g., `[256, -12]` -> `[array(0), array(244)]`. If `True`, then `[256, -12]` -> `[array(255), array(0)]`. + channel_wise: if True, calculate on each channel separately. Please ensure + that the first dimension represents the channel of the image if True. prob: probability of shift. """ RandomizableTransform.__init__(self, prob) @@ -272,13 +276,17 @@ def __init__(self, offsets: tuple[float, float] | float, safe: bool = False, pro else: self.offsets = (min(offsets), max(offsets)) self._offset = self.offsets[0] + self.channel_wise = channel_wise self._shifter = ShiftIntensity(self._offset, safe) def randomize(self, data: Any | None = None) -> None: super().randomize(None) if not self._do_transform: return None - self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) + if self.channel_wise: + self._offset = [self.R.uniform(low=self.offsets[0], high=self.offsets[1]) for _ in range(data.shape[0])] + else: + self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) def __call__(self, img: NdarrayOrTensor, factor: float | None = None, randomize: bool = True) -> NdarrayOrTensor: """ @@ -292,12 +300,17 @@ def __call__(self, img: NdarrayOrTensor, factor: float | None = None, randomize: """ img = convert_to_tensor(img, track_meta=get_track_meta()) if randomize: - self.randomize() + self.randomize(img) if not self._do_transform: return img - return self._shifter(img, self._offset if factor is None else self._offset * factor) + if self.channel_wise: + for i, d in enumerate(img): + img[i] = self._shifter(d, self._offset[i] if factor is None else self._offset * factor) + else: + img = self._shifter(img, self._offset if factor is None else self._offset * factor) + return img class StdShiftIntensity(Transform): @@ -493,8 +506,8 @@ def __init__( fixed_mean: subtract the mean intensity before scaling with `factor`, then add the same value after scaling to ensure that the output has the same mean as the input. channel_wise: if True, scale on each channel separately. `preserve_range` and `fixed_mean` are also applied - on each channel separately if `channel_wise` is True. Please ensure that the first dimension represents the - channel of the image if True. + on each channel separately if `channel_wise` is True. Please ensure that the first dimension represents the + channel of the image if True. dtype: output data type, if None, same as input image. defaults to float32. """ self.factor = factor @@ -633,12 +646,20 @@ class RandScaleIntensity(RandomizableTransform): backend = ScaleIntensity.backend - def __init__(self, factors: tuple[float, float] | float, prob: float = 0.1, dtype: DtypeLike = np.float32) -> None: + def __init__( + self, + factors: tuple[float, float] | float, + prob: float = 0.1, + channel_wise: bool = False, + dtype: DtypeLike = np.float32, + ) -> None: """ Args: factors: factor range to randomly scale by ``v = v * (1 + factor)``. if single number, factor value is picked from (-factors, factors). prob: probability of scale. + channel_wise: if True, scale on each channel separately. Please ensure + that the first dimension represents the channel of the image if True. dtype: output data type, if None, same as input image. defaults to float32. """ @@ -650,13 +671,17 @@ def __init__(self, factors: tuple[float, float] | float, prob: float = 0.1, dtyp else: self.factors = (min(factors), max(factors)) self.factor = self.factors[0] + self.channel_wise = channel_wise self.dtype = dtype def randomize(self, data: Any | None = None) -> None: super().randomize(None) if not self._do_transform: return None - self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) + if self.channel_wise: + self.factor = [self.R.uniform(low=self.factors[0], high=self.factors[1]) for _ in range(data.shape[0])] + else: + self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: """ @@ -664,12 +689,20 @@ def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTen """ img = convert_to_tensor(img, track_meta=get_track_meta()) if randomize: - self.randomize() + self.randomize(img) if not self._do_transform: return convert_data_type(img, dtype=self.dtype)[0] - return ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype)(img) + if self.channel_wise: + out = [] + for i, d in enumerate(img): + out_channel = ScaleIntensity(minv=None, maxv=None, factor=self.factor[i], dtype=self.dtype)(d) + out.append(out_channel) + ret = torch.stack(out) + else: + ret = ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype)(img) + return ret class RandBiasField(RandomizableTransform): diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 91acff0c3d..4b856a4cd9 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -369,6 +369,7 @@ def __init__( keys: KeysCollection, offsets: tuple[float, float] | float, safe: bool = False, + channel_wise: bool = False, factor_key: str | None = None, meta_keys: KeysCollection | None = None, meta_key_postfix: str = DEFAULT_POST_FIX, @@ -383,6 +384,8 @@ def __init__( if single number, offset value is picked from (-offsets, offsets). safe: if `True`, then do safe dtype convert when intensity overflow. default to `False`. E.g., `[256, -12]` -> `[array(0), array(244)]`. If `True`, then `[256, -12]` -> `[array(255), array(0)]`. + channel_wise: if True, calculate on each channel separately. Please ensure + that the first dimension represents the channel of the image if True. factor_key: if not None, use it as the key to extract a value from the corresponding metadata dictionary of `key` at runtime, and multiply the random `offset` to shift intensity. Usually, `IntensityStatsd` transform can pre-compute statistics of intensity values @@ -409,7 +412,7 @@ def __init__( if len(self.keys) != len(self.meta_keys): raise ValueError("meta_keys should have the same length as keys.") self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys)) - self.shifter = RandShiftIntensity(offsets=offsets, safe=safe, prob=1.0) + self.shifter = RandShiftIntensity(offsets=offsets, safe=safe, channel_wise=channel_wise, prob=1.0) def set_random_state( self, seed: int | None = None, state: np.random.RandomState | None = None @@ -586,6 +589,7 @@ def __init__( keys: KeysCollection, factors: tuple[float, float] | float, prob: float = 0.1, + channel_wise: bool = False, dtype: DtypeLike = np.float32, allow_missing_keys: bool = False, ) -> None: @@ -597,13 +601,15 @@ def __init__( if single number, factor value is picked from (-factors, factors). prob: probability of scale. (Default 0.1, with 10% probability it returns a scaled array.) + channel_wise: if True, scale on each channel separately. Please ensure + that the first dimension represents the channel of the image if True. dtype: output data type, if None, same as input image. defaults to float32. allow_missing_keys: don't raise exception if key is missing. """ MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - self.scaler = RandScaleIntensity(factors=factors, dtype=dtype, prob=1.0) + self.scaler = RandScaleIntensity(factors=factors, dtype=dtype, prob=1.0, channel_wise=channel_wise) def set_random_state( self, seed: int | None = None, state: np.random.RandomState | None = None @@ -620,8 +626,15 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N d[key] = convert_to_tensor(d[key], track_meta=get_track_meta()) return d + # expect all the specified keys have same spatial shape and share same random holes + first_key: Hashable = self.first_key(d) + if first_key == (): + for key in self.key_iterator(d): + d[key] = convert_to_tensor(d[key], track_meta=get_track_meta()) + return d + # all the keys share the same random scale factor - self.scaler.randomize(None) + self.scaler.randomize(d[first_key]) for key in self.key_iterator(d): d[key] = self.scaler(d[key], randomize=False) return d From 718adabd0e66a49b9fb910b95ad224a486791f39 Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Fri, 28 Jul 2023 15:19:28 +0800 Subject: [PATCH 2/6] add unittests Signed-off-by: KumoLiu --- tests/test_rand_scale_intensity.py | 16 ++++++++++++++++ tests/test_rand_scale_intensityd.py | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/tests/test_rand_scale_intensity.py b/tests/test_rand_scale_intensity.py index 5f5ca076a8..a857c0cefb 100644 --- a/tests/test_rand_scale_intensity.py +++ b/tests/test_rand_scale_intensity.py @@ -33,6 +33,22 @@ def test_value(self, p): expected = p((self.imt * (1 + np.random.uniform(low=-0.5, high=0.5))).astype(np.float32)) assert_allclose(result, p(expected), rtol=1e-7, atol=0, type_test="tensor") + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_channel_wise(self, p): + scaler = RandScaleIntensity(factors=0.5, channel_wise=True, prob=1.0) + scaler.set_random_state(seed=0) + im = p(self.imt) + result = scaler(im) + np.random.seed(0) + # simulate the randomize() of transform + np.random.random() + channel_num = self.imt.shape[0] + factor = [np.random.uniform(low=-0.5, high=0.5) for _ in range(channel_num)] + expected = p( + np.stack([np.asarray((self.imt[i]) * (1 + factor[i])) for i in range(channel_num)]).astype(np.float32) + ) + assert_allclose(result, expected, atol=0, rtol=1e-5, type_test=False) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_scale_intensityd.py b/tests/test_rand_scale_intensityd.py index 6b5a04a8f3..8d928ac157 100644 --- a/tests/test_rand_scale_intensityd.py +++ b/tests/test_rand_scale_intensityd.py @@ -32,6 +32,22 @@ def test_value(self): expected = (self.imt * (1 + np.random.uniform(low=-0.5, high=0.5))).astype(np.float32) assert_allclose(result[key], p(expected), type_test="tensor") + def test_channel_wise(self): + key = "img" + for p in TEST_NDARRAYS: + scaler = RandScaleIntensityd(keys=[key], factors=0.5, prob=1.0, channel_wise=True) + scaler.set_random_state(seed=0) + result = scaler({key: p(self.imt)}) + np.random.seed(0) + # simulate the randomize function of transform + np.random.random() + channel_num = self.imt.shape[0] + factor = [np.random.uniform(low=-0.5, high=0.5) for _ in range(channel_num)] + expected = p( + np.stack([np.asarray((self.imt[i]) * (1 + factor[i])) for i in range(channel_num)]).astype(np.float32) + ) + assert_allclose(result[key], p(expected), type_test="tensor") + if __name__ == "__main__": unittest.main() From b18b31b74a6dec97fa0dcd61e9de710560b8fc99 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Thu, 27 Jul 2023 19:34:49 +0800 Subject: [PATCH 3/6] Update GDSDataset (#6787) Fixes #6786 . ### Description - Update rst - Update the type of dtype to str ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: KumoLiu --- .github/workflows/docker.yml | 2 +- docs/source/data.rst | 7 ++++++ monai/data/dataset.py | 15 ++++++++----- tests/test_gdsdataset.py | 42 +++++++++++++++++++++++++++++------- 4 files changed, 52 insertions(+), 14 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 2c809b9817..1702a1211c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -89,7 +89,7 @@ jobs: steps: - name: Import run: | - export CUDA_VISIBLE_DEVICES= # cpu-only + export OMP_NUM_THREADS=4 MKL_NUM_THREADS=4 CUDA_VISIBLE_DEVICES= # cpu-only python -c 'import monai; monai.config.print_debug_info()' cd /opt/monai ls -al diff --git a/docs/source/data.rst b/docs/source/data.rst index b789102b81..63d5e0e23d 100644 --- a/docs/source/data.rst +++ b/docs/source/data.rst @@ -45,6 +45,13 @@ Generic Interfaces :members: :special-members: __getitem__ +`GDSDataset` +~~~~~~~~~~~~~~~~~~~ +.. autoclass:: GDSDataset + :members: + :special-members: __getitem__ + + `CacheNTransDataset` ~~~~~~~~~~~~~~~~~~~~ .. autoclass:: CacheNTransDataset diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 6aebe47ed7..a20511267b 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -1521,6 +1521,8 @@ class GDSDataset(PersistentDataset): bandwidth while decreasing latency and utilization load on the CPU and GPU. A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/main/modules/GDS_dataset.ipynb. + + See also: https://github.com/rapidsai/kvikio """ def __init__( @@ -1607,9 +1609,10 @@ def _cachecheck(self, item_transformed): return item elif isinstance(item_transformed, (np.ndarray, torch.Tensor)): _meta = self._load_meta_cache(meta_hash_file_name=f"{hashfile.name}-meta") - _data = kvikio_numpy.fromfile(f"{hashfile}", dtype=_meta.pop("dtype"), like=cp.empty(())) - _data = convert_to_tensor(_data.reshape(_meta.pop("shape")), device=f"cuda:{self.device}") - if bool(_meta): + _data = kvikio_numpy.fromfile(f"{hashfile}", dtype=_meta["dtype"], like=cp.empty(())) + _data = convert_to_tensor(_data.reshape(_meta["shape"]), device=f"cuda:{self.device}") + filtered_keys = list(filter(lambda key: key not in ["dtype", "shape"], _meta.keys())) + if bool(filtered_keys): return (_data, _meta) return _data else: @@ -1617,7 +1620,9 @@ def _cachecheck(self, item_transformed): for i, _item in enumerate(item_transformed): for k in _item: meta_i_k = self._load_meta_cache(meta_hash_file_name=f"{hashfile.name}-{k}-meta-{i}") - item_k = kvikio_numpy.fromfile(f"{hashfile}-{k}-{i}", dtype=np.float32, like=cp.empty(())) + item_k = kvikio_numpy.fromfile( + f"{hashfile}-{k}-{i}", dtype=meta_i_k["dtype"], like=cp.empty(()) + ) item_k = convert_to_tensor(item[i].reshape(meta_i_k["shape"]), device=f"cuda:{self.device}") item[i].update({k: item_k, f"{k}_meta_dict": meta_i_k}) return item @@ -1653,7 +1658,7 @@ def _create_new_cache(self, data, data_hashfile, meta_hash_file_name): if isinstance(_item_transformed_data, torch.Tensor): _item_transformed_data = _item_transformed_data.numpy() self._meta_cache[meta_hash_file_name]["shape"] = _item_transformed_data.shape - self._meta_cache[meta_hash_file_name]["dtype"] = _item_transformed_data.dtype + self._meta_cache[meta_hash_file_name]["dtype"] = str(_item_transformed_data.dtype) kvikio_numpy.tofile(_item_transformed_data, data_hashfile) try: # NOTE: Writing to a temporary directory and then using a nearly atomic rename operation diff --git a/tests/test_gdsdataset.py b/tests/test_gdsdataset.py index 2971b34fe7..29f2d0096b 100644 --- a/tests/test_gdsdataset.py +++ b/tests/test_gdsdataset.py @@ -17,6 +17,7 @@ import unittest import numpy as np +import torch from parameterized import parameterized from monai.data import GDSDataset, json_hashing @@ -48,6 +49,19 @@ TEST_CASE_3 = [None, (128, 128, 128)] +DTYPES = { + np.dtype(np.uint8): torch.uint8, + np.dtype(np.int8): torch.int8, + np.dtype(np.int16): torch.int16, + np.dtype(np.int32): torch.int32, + np.dtype(np.int64): torch.int64, + np.dtype(np.float16): torch.float16, + np.dtype(np.float32): torch.float32, + np.dtype(np.float64): torch.float64, + np.dtype(np.complex64): torch.complex64, + np.dtype(np.complex128): torch.complex128, +} + class _InplaceXform(Transform): def __call__(self, data): @@ -93,16 +107,28 @@ def test_metatensor(self): shape = (1, 10, 9, 8) items = [TEST_NDARRAYS[-1](np.arange(0, np.prod(shape)).reshape(shape))] with tempfile.TemporaryDirectory() as tempdir: - ds = GDSDataset( - data=items, - transform=_InplaceXform(), - cache_dir=tempdir, - device=0, - pickle_module="pickle", - pickle_protocol=pickle.HIGHEST_PROTOCOL, - ) + ds = GDSDataset(data=items, transform=_InplaceXform(), cache_dir=tempdir, device=0) assert_allclose(ds[0], ds[0][0], type_test=False) + def test_dtype(self): + shape = (1, 10, 9, 8) + data = np.arange(0, np.prod(shape)).reshape(shape) + for _dtype in DTYPES.keys(): + items = [np.array(data).astype(_dtype)] + with tempfile.TemporaryDirectory() as tempdir: + ds = GDSDataset(data=items, transform=_InplaceXform(), cache_dir=tempdir, device=0) + ds1 = GDSDataset(data=items, transform=_InplaceXform(), cache_dir=tempdir, device=0) + self.assertEqual(ds[0].dtype, _dtype) + self.assertEqual(ds1[0].dtype, DTYPES[_dtype]) + + for _dtype in DTYPES.keys(): + items = [torch.tensor(data, dtype=DTYPES[_dtype])] + with tempfile.TemporaryDirectory() as tempdir: + ds = GDSDataset(data=items, transform=_InplaceXform(), cache_dir=tempdir, device=0) + ds1 = GDSDataset(data=items, transform=_InplaceXform(), cache_dir=tempdir, device=0) + self.assertEqual(ds[0].dtype, DTYPES[_dtype]) + self.assertEqual(ds1[0].dtype, DTYPES[_dtype]) + @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_shape(self, transform, expected_shape): test_image = nib.Nifti1Image(np.random.randint(0, 2, size=[128, 128, 128]).astype(float), np.eye(4)) From c976790b26a8e8e4bd8c43b4d3aef95c520e983e Mon Sep 17 00:00:00 2001 From: Saurav Maheshkar Date: Thu, 27 Jul 2023 19:33:06 +0530 Subject: [PATCH 4/6] feat: set `data_range` as a property (#6788) Fixes #6441 ### Description Creates a setter method for `data_range` by setting it as a property using the **`@property`** decorator. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. Signed-off-by: Saurav Maheshkar Signed-off-by: KumoLiu --- monai/losses/ssim_loss.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/monai/losses/ssim_loss.py b/monai/losses/ssim_loss.py index 8ea3eb116b..8ee1da7267 100644 --- a/monai/losses/ssim_loss.py +++ b/monai/losses/ssim_loss.py @@ -61,7 +61,7 @@ def __init__( """ super().__init__(reduction=LossReduction(reduction).value) self.spatial_dims = spatial_dims - self.data_range = data_range + self._data_range = data_range self.kernel_type = kernel_type if not isinstance(win_size, Sequence): @@ -77,7 +77,7 @@ def __init__( self.ssim_metric = SSIMMetric( spatial_dims=self.spatial_dims, - data_range=self.data_range, + data_range=self._data_range, kernel_type=self.kernel_type, win_size=self.kernel_size, kernel_sigma=self.kernel_sigma, @@ -85,6 +85,15 @@ def __init__( k2=self.k2, ) + @property + def data_range(self) -> float: + return self._data_range + + @data_range.setter + def data_range(self, value: float) -> None: + self._data_range = value + self.ssim_metric.data_range = value + def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ Args: From bea398c523ca7c98589a1279361abcbb3a8d380c Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Fri, 28 Jul 2023 15:24:48 +0800 Subject: [PATCH 5/6] revert `RandShiftIntensity` Signed-off-by: KumoLiu --- monai/transforms/intensity/array.py | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 374ac62688..4b791f26e5 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -255,17 +255,13 @@ class RandShiftIntensity(RandomizableTransform): backend = [TransformBackends.TORCH, TransformBackends.NUMPY] - def __init__( - self, offsets: tuple[float, float] | float, safe: bool = False, channel_wise: bool = False, prob: float = 0.1 - ) -> None: + def __init__(self, offsets: tuple[float, float] | float, safe: bool = False, prob: float = 0.1) -> None: """ Args: offsets: offset range to randomly shift. if single number, offset value is picked from (-offsets, offsets). safe: if `True`, then do safe dtype convert when intensity overflow. default to `False`. E.g., `[256, -12]` -> `[array(0), array(244)]`. If `True`, then `[256, -12]` -> `[array(255), array(0)]`. - channel_wise: if True, calculate on each channel separately. Please ensure - that the first dimension represents the channel of the image if True. prob: probability of shift. """ RandomizableTransform.__init__(self, prob) @@ -276,17 +272,13 @@ def __init__( else: self.offsets = (min(offsets), max(offsets)) self._offset = self.offsets[0] - self.channel_wise = channel_wise self._shifter = ShiftIntensity(self._offset, safe) def randomize(self, data: Any | None = None) -> None: super().randomize(None) if not self._do_transform: return None - if self.channel_wise: - self._offset = [self.R.uniform(low=self.offsets[0], high=self.offsets[1]) for _ in range(data.shape[0])] - else: - self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) + self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) def __call__(self, img: NdarrayOrTensor, factor: float | None = None, randomize: bool = True) -> NdarrayOrTensor: """ @@ -300,17 +292,12 @@ def __call__(self, img: NdarrayOrTensor, factor: float | None = None, randomize: """ img = convert_to_tensor(img, track_meta=get_track_meta()) if randomize: - self.randomize(img) + self.randomize() if not self._do_transform: return img - if self.channel_wise: - for i, d in enumerate(img): - img[i] = self._shifter(d, self._offset[i] if factor is None else self._offset * factor) - else: - img = self._shifter(img, self._offset if factor is None else self._offset * factor) - return img + return self._shifter(img, self._offset if factor is None else self._offset * factor) class StdShiftIntensity(Transform): From f480b59dad35db2c012db3e6fd7c3612fa10e704 Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Fri, 28 Jul 2023 15:32:38 +0800 Subject: [PATCH 6/6] fix mypy Signed-off-by: KumoLiu --- monai/transforms/intensity/array.py | 7 ++++--- monai/transforms/intensity/dictionary.py | 5 +---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 4b791f26e5..8cd15083c9 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -666,7 +666,7 @@ def randomize(self, data: Any | None = None) -> None: if not self._do_transform: return None if self.channel_wise: - self.factor = [self.R.uniform(low=self.factors[0], high=self.factors[1]) for _ in range(data.shape[0])] + self.factor = [self.R.uniform(low=self.factors[0], high=self.factors[1]) for _ in range(data.shape[0])] # type: ignore else: self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) @@ -681,12 +681,13 @@ def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTen if not self._do_transform: return convert_data_type(img, dtype=self.dtype)[0] + ret: NdarrayOrTensor if self.channel_wise: out = [] for i, d in enumerate(img): - out_channel = ScaleIntensity(minv=None, maxv=None, factor=self.factor[i], dtype=self.dtype)(d) + out_channel = ScaleIntensity(minv=None, maxv=None, factor=self.factor[i], dtype=self.dtype)(d) # type: ignore out.append(out_channel) - ret = torch.stack(out) + ret = torch.stack(out) # type: ignore else: ret = ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype)(img) return ret diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 4b856a4cd9..32052ad406 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -369,7 +369,6 @@ def __init__( keys: KeysCollection, offsets: tuple[float, float] | float, safe: bool = False, - channel_wise: bool = False, factor_key: str | None = None, meta_keys: KeysCollection | None = None, meta_key_postfix: str = DEFAULT_POST_FIX, @@ -384,8 +383,6 @@ def __init__( if single number, offset value is picked from (-offsets, offsets). safe: if `True`, then do safe dtype convert when intensity overflow. default to `False`. E.g., `[256, -12]` -> `[array(0), array(244)]`. If `True`, then `[256, -12]` -> `[array(255), array(0)]`. - channel_wise: if True, calculate on each channel separately. Please ensure - that the first dimension represents the channel of the image if True. factor_key: if not None, use it as the key to extract a value from the corresponding metadata dictionary of `key` at runtime, and multiply the random `offset` to shift intensity. Usually, `IntensityStatsd` transform can pre-compute statistics of intensity values @@ -412,7 +409,7 @@ def __init__( if len(self.keys) != len(self.meta_keys): raise ValueError("meta_keys should have the same length as keys.") self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys)) - self.shifter = RandShiftIntensity(offsets=offsets, safe=safe, channel_wise=channel_wise, prob=1.0) + self.shifter = RandShiftIntensity(offsets=offsets, safe=safe, prob=1.0) def set_random_state( self, seed: int | None = None, state: np.random.RandomState | None = None