diff --git a/monai/data/png_writer.py b/monai/data/png_writer.py index 2baec3b872..52163e40ac 100644 --- a/monai/data/png_writer.py +++ b/monai/data/png_writer.py @@ -48,7 +48,7 @@ def write_png( """ if not isinstance(data, np.ndarray): - raise AssertionError("input data must be numpy array.") + raise ValueError("input data must be numpy array.") if len(data.shape) == 3 and data.shape[2] == 1: # PIL Image can't save image with 1 channel data = data.squeeze(2) if output_spatial_shape is not None: @@ -59,11 +59,11 @@ def write_png( _min, _max = np.min(data), np.max(data) if len(data.shape) == 3: data = np.moveaxis(data, -1, 0) # to channel first - data = xform(data) + data = xform(data) # type: ignore data = np.moveaxis(data, 0, -1) else: # (H, W) data = np.expand_dims(data, 0) # make a channel - data = xform(data)[0] # first channel + data = xform(data)[0] # type: ignore if mode != InterpolateMode.NEAREST: data = np.clip(data, _min, _max) # type: ignore diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index e1c915cc93..276ba6104d 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -128,10 +128,7 @@ def __call__( # all zeros, skip padding return img mode = convert_pad_mode(dst=img, mode=mode or self.mode).value - if isinstance(img, torch.Tensor): - pad = self._pt_pad - else: - pad = self._np_pad # type: ignore + pad = self._pt_pad if isinstance(img, torch.Tensor) else self._np_pad return pad(img, self.to_pad, mode, **self.kwargs) # type: ignore @@ -449,15 +446,16 @@ class CenterSpatialCrop(Transform): the spatial size of output data will be [32, 40, 40]. """ + backend = SpatialCrop.backend + def __init__(self, roi_size: Union[Sequence[int], int]) -> None: self.roi_size = roi_size - def __call__(self, img: np.ndarray): + def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't apply to the channel dim. """ - img, *_ = convert_data_type(img, np.ndarray) # type: ignore roi_size = fall_back_tuple(self.roi_size, img.shape[1:]) center = [i // 2 for i in img.shape[1:]] cropper = SpatialCrop(roi_center=center, roi_size=roi_size) @@ -474,11 +472,12 @@ class CenterScaleCrop(Transform): """ + backend = CenterSpatialCrop.backend + def __init__(self, roi_scale: Union[Sequence[float], float]): self.roi_scale = roi_scale - def __call__(self, img: np.ndarray): - img, *_ = convert_data_type(img, np.ndarray) # type: ignore + def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: img_size = img.shape[1:] ndim = len(img_size) roi_size = [ceil(r * s) for r, s in zip(ensure_tuple_rep(self.roi_scale, ndim), img_size)] @@ -510,6 +509,8 @@ class RandSpatialCrop(Randomizable, Transform): if True, the actual size is sampled from `randint(roi_size, max_roi_size + 1)`. """ + backend = CenterSpatialCrop.backend + def __init__( self, roi_size: Union[Sequence[int], int], @@ -535,15 +536,14 @@ def randomize(self, img_size: Sequence[int]) -> None: valid_size = get_valid_patch_size(img_size, self._size) self._slices = (slice(None),) + get_random_patch(img_size, valid_size, self.R) - def __call__(self, img: np.ndarray): + def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't apply to the channel dim. """ - img, *_ = convert_data_type(img, np.ndarray) # type: ignore self.randomize(img.shape[1:]) if self._size is None: - raise AssertionError + raise RuntimeError("self._size not specified.") if self.random_center: return img[self._slices] cropper = CenterSpatialCrop(self._size) @@ -582,12 +582,11 @@ def __init__( self.roi_scale = roi_scale self.max_roi_scale = max_roi_scale - def __call__(self, img: np.ndarray): + def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't apply to the channel dim. """ - img, *_ = convert_data_type(img, np.ndarray) # type: ignore img_size = img.shape[1:] ndim = len(img_size) self.roi_size = [ceil(r * s) for r, s in zip(ensure_tuple_rep(self.roi_scale, ndim), img_size)] @@ -629,6 +628,8 @@ class RandSpatialCropSamples(Randomizable, Transform): """ + backend = RandScaleCrop.backend + def __init__( self, roi_size: Union[Sequence[int], int], @@ -652,12 +653,11 @@ def set_random_state( def randomize(self, data: Optional[Any] = None) -> None: pass - def __call__(self, img: np.ndarray) -> List[np.ndarray]: + def __call__(self, img: NdarrayOrTensor) -> List[NdarrayOrTensor]: """ Apply the transform to `img`, assuming `img` is channel-first and cropping doesn't change the channel dim. """ - img, *_ = convert_data_type(img, np.ndarray) # type: ignore return [self.cropper(img) for _ in range(self.num_samples)] @@ -1128,6 +1128,8 @@ class ResizeWithPadOrCrop(Transform): """ + backend = list(set(SpatialPad.backend) & set(CenterSpatialCrop.backend)) + def __init__( self, spatial_size: Union[Sequence[int], int], @@ -1138,7 +1140,7 @@ def __init__( self.padder = SpatialPad(spatial_size=spatial_size, method=method, mode=mode, **np_kwargs) self.cropper = CenterSpatialCrop(roi_size=spatial_size) - def __call__(self, img: np.ndarray, mode: Optional[Union[NumpyPadMode, str]] = None) -> np.ndarray: + def __call__(self, img: NdarrayOrTensor, mode: Optional[Union[NumpyPadMode, str]] = None) -> NdarrayOrTensor: """ Args: img: data to pad or crop, assuming `img` is channel-first and @@ -1149,7 +1151,6 @@ def __call__(self, img: np.ndarray, mode: Optional[Union[NumpyPadMode, str]] = N If None, defaults to the ``mode`` in construction. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html """ - img, *_ = convert_data_type(img, np.ndarray) # type: ignore return self.padder(self.cropper(img), mode=mode) # type: ignore diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 488b832450..2590bf2e77 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -416,13 +416,15 @@ class CenterSpatialCropd(MapTransform, InvertibleTransform): allow_missing_keys: don't raise exception if key is missing. """ + backend = CenterSpatialCrop.backend + def __init__( self, keys: KeysCollection, roi_size: Union[Sequence[int], int], allow_missing_keys: bool = False ) -> None: super().__init__(keys, allow_missing_keys) self.cropper = CenterSpatialCrop(roi_size) - def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key in self.key_iterator(d): orig_size = d[key].shape[1:] @@ -466,13 +468,15 @@ class CenterScaleCropd(MapTransform, InvertibleTransform): allow_missing_keys: don't raise exception if key is missing. """ + backend = CenterSpatialCrop.backend + def __init__( self, keys: KeysCollection, roi_scale: Union[Sequence[float], float], allow_missing_keys: bool = False ) -> None: super().__init__(keys, allow_missing_keys=allow_missing_keys) self.roi_scale = roi_scale - def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) # use the spatial size of first image to scale, expect all images have the same spatial size img_size = data[self.keys[0]].shape[1:] @@ -537,6 +541,8 @@ class RandSpatialCropd(Randomizable, MapTransform, InvertibleTransform): allow_missing_keys: don't raise exception if key is missing. """ + backend = CenterSpatialCrop.backend + def __init__( self, keys: KeysCollection, @@ -565,11 +571,11 @@ def randomize(self, img_size: Sequence[int]) -> None: valid_size = get_valid_patch_size(img_size, self._size) self._slices = (slice(None),) + get_random_patch(img_size, valid_size, self.R) - def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) self.randomize(d[self.keys[0]].shape[1:]) # image shape from the first data key if self._size is None: - raise AssertionError + raise RuntimeError("self._size not specified.") for key in self.key_iterator(d): if self.random_center: self.push_transform(d, key, {"slices": [(i.start, i.stop) for i in self._slices[1:]]}) # type: ignore @@ -638,6 +644,8 @@ class RandScaleCropd(RandSpatialCropd): allow_missing_keys: don't raise exception if key is missing. """ + backend = RandSpatialCropd.backend + def __init__( self, keys: KeysCollection, @@ -659,7 +667,7 @@ def __init__( self.roi_scale = roi_scale self.max_roi_scale = max_roi_scale - def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: img_size = data[self.keys[0]].shape[1:] ndim = len(img_size) self.roi_size = [ceil(r * s) for r, s in zip(ensure_tuple_rep(self.roi_scale, ndim), img_size)] @@ -723,6 +731,8 @@ class RandSpatialCropSamplesd(Randomizable, MapTransform, InvertibleTransform): """ + backend = RandSpatialCropd.backend + def __init__( self, keys: KeysCollection, @@ -755,7 +765,7 @@ def set_random_state( def randomize(self, data: Optional[Any] = None) -> None: pass - def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, np.ndarray]]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> List[Dict[Hashable, NdarrayOrTensor]]: ret = [] for i in range(self.num_samples): d = dict(data) @@ -765,14 +775,14 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n cropped = self.cropper(d) # self.cropper will have added RandSpatialCropd to the list. Change to RandSpatialCropSamplesd for key in self.key_iterator(cropped): - cropped[str(key) + InverseKeys.KEY_SUFFIX][-1][InverseKeys.CLASS_NAME] = self.__class__.__name__ - cropped[str(key) + InverseKeys.KEY_SUFFIX][-1][InverseKeys.ID] = id(self) + cropped[str(key) + InverseKeys.KEY_SUFFIX][-1][InverseKeys.CLASS_NAME] = self.__class__.__name__ # type: ignore + cropped[str(key) + InverseKeys.KEY_SUFFIX][-1][InverseKeys.ID] = id(self) # type: ignore # add `patch_index` to the meta data for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix): meta_key = meta_key or f"{key}_{meta_key_postfix}" if meta_key not in cropped: cropped[meta_key] = {} # type: ignore - cropped[meta_key][Key.PATCH_INDEX] = i + cropped[meta_key][Key.PATCH_INDEX] = i # type: ignore ret.append(cropped) return ret @@ -1377,6 +1387,8 @@ class ResizeWithPadOrCropd(MapTransform, InvertibleTransform): """ + backend = ResizeWithPadOrCrop.backend + def __init__( self, keys: KeysCollection, @@ -1390,7 +1402,7 @@ def __init__( self.mode = ensure_tuple_rep(mode, len(self.keys)) self.padcropper = ResizeWithPadOrCrop(spatial_size=spatial_size, method=method, **np_kwargs) - def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key, m in self.key_iterator(d, self.mode): orig_size = d[key].shape[1:] diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 1b03d39a15..6f662f2dce 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -362,6 +362,8 @@ class Resize(Transform): See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate """ + backend = [TransformBackends.TORCH] + def __init__( self, spatial_size: Union[Sequence[int], int], @@ -376,10 +378,10 @@ def __init__( def __call__( self, - img: np.ndarray, + img: NdarrayOrTensor, mode: Optional[Union[InterpolateMode, str]] = None, align_corners: Optional[bool] = None, - ) -> np.ndarray: + ) -> NdarrayOrTensor: """ Args: img: channel first array, must have shape: (num_channels, H[, W, ..., ]). @@ -394,33 +396,33 @@ def __call__( ValueError: When ``self.spatial_size`` length is less than ``img`` spatial dimensions. """ - img, *_ = convert_data_type(img, np.ndarray) # type: ignore + img_, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float) # type: ignore if self.size_mode == "all": - input_ndim = img.ndim - 1 # spatial ndim + input_ndim = img_.ndim - 1 # spatial ndim output_ndim = len(ensure_tuple(self.spatial_size)) if output_ndim > input_ndim: - input_shape = ensure_tuple_size(img.shape, output_ndim + 1, 1) - img = img.reshape(input_shape) + input_shape = ensure_tuple_size(img_.shape, output_ndim + 1, 1) + img_ = img_.reshape(input_shape) elif output_ndim < input_ndim: raise ValueError( "len(spatial_size) must be greater or equal to img spatial dimensions, " f"got spatial_size={output_ndim} img={input_ndim}." ) - spatial_size_ = fall_back_tuple(self.spatial_size, img.shape[1:]) + spatial_size_ = fall_back_tuple(self.spatial_size, img_.shape[1:]) else: # for the "longest" mode - img_size = img.shape[1:] + img_size = img_.shape[1:] if not isinstance(self.spatial_size, int): raise ValueError("spatial_size must be an int number if size_mode is 'longest'.") scale = self.spatial_size / max(img_size) spatial_size_ = tuple(int(round(s * scale)) for s in img_size) resized = torch.nn.functional.interpolate( # type: ignore - input=torch.as_tensor(np.ascontiguousarray(img), dtype=torch.float).unsqueeze(0), + input=img_.unsqueeze(0), # type: ignore size=spatial_size_, mode=look_up_option(self.mode if mode is None else mode, InterpolateMode).value, align_corners=self.align_corners if align_corners is None else align_corners, ) - resized = resized.squeeze(0).detach().cpu().numpy() - return np.asarray(resized) + out, *_ = convert_to_dst_type(resized.squeeze(0), img) + return out class Rotate(Transform, ThreadUnsafe): @@ -462,7 +464,7 @@ def __init__( self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode) self.align_corners = align_corners self.dtype = dtype - self._rotation_matrix: Optional[np.ndarray] = None + self._rotation_matrix: Optional[NdarrayOrTensor] = None def __call__( self, @@ -511,7 +513,7 @@ def __call__( corners = np.asarray(np.meshgrid(*[(0, dim) for dim in im_shape], indexing="ij")).reshape( (len(im_shape), -1) ) - corners = transform[:-1, :-1] @ corners + corners = transform[:-1, :-1] @ corners # type: ignore output_shape = np.asarray(corners.ptp(axis=1) + 0.5, dtype=int) shift_1 = create_translate(input_ndim, (-(output_shape - 1) / 2).tolist()) transform = shift @ transform @ shift_1 @@ -532,7 +534,7 @@ def __call__( out, *_ = convert_to_dst_type(output, dst=img, dtype=output.dtype) return out - def get_rotation_matrix(self) -> Optional[np.ndarray]: + def get_rotation_matrix(self) -> Optional[NdarrayOrTensor]: """ Get the most recently applied rotation matrix This is not thread-safe. @@ -1055,6 +1057,10 @@ def __call__( grid: Optional[NdarrayOrTensor] = None, ) -> Tuple[NdarrayOrTensor, NdarrayOrTensor]: """ + The grid can be initialized with a `spatial_size` parameter, or provided directly as `grid`. + Therefore, either `spatial_size` or `grid` must be provided. + When initialising from `spatial_size`, the backend "torch" will be used. + Args: spatial_size: output grid size. grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D. @@ -1065,26 +1071,32 @@ def __call__( """ if grid is None: if spatial_size is not None: - grid = create_grid(spatial_size, dtype=float) + grid = create_grid(spatial_size, device=self.device, backend="torch") else: raise ValueError("Incompatible values: grid=None and spatial_size=None.") + _b = TransformBackends.TORCH if isinstance(grid, torch.Tensor) else TransformBackends.NUMPY + _device = grid.device if isinstance(grid, torch.Tensor) else self.device affine: NdarrayOrTensor if self.affine is None: spatial_dims = len(grid.shape) - 1 - affine = np.eye(spatial_dims + 1) + affine = ( + torch.eye(spatial_dims + 1, device=_device) + if _b == TransformBackends.TORCH + else np.eye(spatial_dims + 1) + ) if self.rotate_params: - affine = affine @ create_rotate(spatial_dims, self.rotate_params) + affine = affine @ create_rotate(spatial_dims, self.rotate_params, device=_device, backend=_b) if self.shear_params: - affine = affine @ create_shear(spatial_dims, self.shear_params) + affine = affine @ create_shear(spatial_dims, self.shear_params, device=_device, backend=_b) if self.translate_params: - affine = affine @ create_translate(spatial_dims, self.translate_params) + affine = affine @ create_translate(spatial_dims, self.translate_params, device=_device, backend=_b) if self.scale_params: - affine = affine @ create_scale(spatial_dims, self.scale_params) + affine = affine @ create_scale(spatial_dims, self.scale_params, device=_device, backend=_b) else: affine = self.affine - grid, *_ = convert_data_type(grid, torch.Tensor, device=self.device, dtype=float) + grid, *_ = convert_data_type(grid, torch.Tensor, device=_device, dtype=float) affine, *_ = convert_to_dst_type(affine, grid) grid = (affine @ grid.reshape((grid.shape[0], -1))).reshape([-1] + list(grid.shape[1:])) @@ -1206,6 +1218,8 @@ class RandDeformGrid(Randomizable, Transform): Generate random deformation grid. """ + backend = [TransformBackends.TORCH] + def __init__( self, spacing: Union[Sequence[float], float], @@ -1243,11 +1257,12 @@ def __call__(self, spatial_size: Sequence[int]): spatial_size: spatial size of the grid. """ self.spacing = fall_back_tuple(self.spacing, (1.0,) * len(spatial_size)) - control_grid = create_control_grid(spatial_size, self.spacing) + control_grid = create_control_grid(spatial_size, self.spacing, device=self.device, backend="torch") self.randomize(control_grid.shape[1:]) - control_grid[: len(spatial_size)] += self.rand_mag * self.random_offset - if self.as_tensor_output: - control_grid = torch.as_tensor(np.ascontiguousarray(control_grid), device=self.device) + _offset, *_ = convert_to_dst_type(self.rand_mag * self.random_offset, control_grid) + control_grid[: len(spatial_size)] += _offset + if not self.as_tensor_output: + control_grid, *_ = convert_data_type(control_grid, output_type=np.ndarray, dtype=np.float32) return control_grid @@ -1300,8 +1315,9 @@ def __call__( """ if grid is None: raise ValueError("Unknown grid.") + _device = img.device if isinstance(img, torch.Tensor) else self.device img_t: torch.Tensor - img_t, *_ = convert_data_type(img, torch.Tensor, device=self.device, dtype=torch.float32) # type: ignore + img_t, *_ = convert_data_type(img, torch.Tensor, device=_device, dtype=torch.float32) # type: ignore grid, *_ = convert_to_dst_type(grid, img_t) if USE_COMPILED: @@ -1553,7 +1569,7 @@ def _init_identity_cache(self): f"'spatial_size={self.spatial_size}', please specify 'spatial_size'." ) return None - return torch.tensor(create_grid(spatial_size=_sp_size)).to(self.rand_affine_grid.device) + return create_grid(spatial_size=_sp_size, device=self.rand_affine_grid.device, backend="torch") def get_identity_grid(self, spatial_size: Sequence[int]): """ @@ -1567,7 +1583,11 @@ def get_identity_grid(self, spatial_size: Sequence[int]): spatial_size, [2] * ndim ): raise RuntimeError(f"spatial_size should not be dynamic, got {spatial_size}.") - return create_grid(spatial_size=spatial_size) if self._cached_grid is None else self._cached_grid + return ( + create_grid(spatial_size=spatial_size, device=self.rand_affine_grid.device, backend="torch") + if self._cached_grid is None + else self._cached_grid + ) def set_random_state( self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None @@ -1701,6 +1721,7 @@ def __init__( ) self.resampler = Resample(device=device) + self.device = device self.spatial_size = spatial_size self.mode: GridSampleMode = look_up_option(mode, GridSampleMode) self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode) @@ -1745,14 +1766,15 @@ def __call__( grid = self.rand_affine_grid(grid=grid) grid = torch.nn.functional.interpolate( # type: ignore recompute_scale_factor=True, - input=torch.as_tensor(grid).unsqueeze(0), + input=grid.unsqueeze(0), scale_factor=list(ensure_tuple(self.deform_grid.spacing)), mode=InterpolateMode.BICUBIC.value, align_corners=False, ) grid = CenterSpatialCrop(roi_size=sp_size)(grid[0]) else: - grid = create_grid(spatial_size=sp_size) + _device = img.device if isinstance(img, torch.Tensor) else self.device + grid = create_grid(spatial_size=sp_size, device=_device, backend="torch") out: NdarrayOrTensor = self.resampler( img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode ) @@ -1890,13 +1912,13 @@ def __call__( """ sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:]) self.randomize(grid_size=sp_size) - grid = create_grid(spatial_size=sp_size) + _device = img.device if isinstance(img, torch.Tensor) else self.device + grid = create_grid(spatial_size=sp_size, device=_device, backend="torch") if self._do_transform: if self.rand_offset is None: - raise AssertionError - grid = torch.as_tensor(np.ascontiguousarray(grid), device=self.device) - gaussian = GaussianFilter(3, self.sigma, 3.0).to(device=self.device) - offset = torch.as_tensor(self.rand_offset, device=self.device).unsqueeze(0) + raise RuntimeError("rand_offset is not initialized.") + gaussian = GaussianFilter(3, self.sigma, 3.0).to(device=_device) + offset = torch.as_tensor(self.rand_offset, device=_device).unsqueeze(0) grid[:3] += gaussian(offset)[0] * self.magnitude grid = self.rand_affine_grid(grid=grid) out: NdarrayOrTensor = self.resampler( diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 487225cb60..f36300dea6 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -521,6 +521,8 @@ class Resized(MapTransform, InvertibleTransform): allow_missing_keys: don't raise exception if key is missing. """ + backend = Resize.backend + def __init__( self, keys: KeysCollection, @@ -535,7 +537,7 @@ def __init__( self.align_corners = ensure_tuple_rep(align_corners, len(self.keys)) self.resizer = Resize(spatial_size=spatial_size, size_mode=size_mode) - def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners): self.push_transform( @@ -549,7 +551,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda d[key] = self.resizer(d[key], mode=mode, align_corners=align_corners) return d - def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = deepcopy(dict(data)) for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) @@ -826,9 +828,7 @@ def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, Nd for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # if transform was not performed and spatial size is None, nothing to do. - if not transform[InverseKeys.DO_TRANSFORM] and self.rand_affine.spatial_size is None: - out: NdarrayOrTensor = d[key] - else: + if transform[InverseKeys.DO_TRANSFORM] or self.rand_affine.spatial_size is not None: orig_size = transform[InverseKeys.ORIG_SIZE] # Create inverse transform fwd_affine = transform[InverseKeys.EXTRA_INFO]["affine"] @@ -968,7 +968,8 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, N ) grid = CenterSpatialCrop(roi_size=sp_size)(grid[0]) else: - grid = create_grid(spatial_size=sp_size) + _device = self.rand_2d_elastic.deform_grid.device + grid = create_grid(spatial_size=sp_size, device=_device, backend="torch") for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): d[key] = self.rand_2d_elastic.resampler(d[key], grid, mode=mode, padding_mode=padding_mode) @@ -1084,12 +1085,12 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, N sp_size = fall_back_tuple(self.rand_3d_elastic.spatial_size, data[self.keys[0]].shape[1:]) self.randomize(grid_size=sp_size) - grid = create_grid(spatial_size=sp_size) + _device = self.rand_3d_elastic.device + grid = create_grid(spatial_size=sp_size, device=_device, backend="torch") if self._do_transform: device = self.rand_3d_elastic.device - grid = torch.tensor(grid).to(device) gaussian = GaussianFilter(spatial_dims=3, sigma=self.rand_3d_elastic.sigma, truncated=3.0).to(device) - offset = torch.tensor(self.rand_3d_elastic.rand_offset, device=device).unsqueeze(0) + offset = torch.as_tensor(self.rand_3d_elastic.rand_offset, device=device).unsqueeze(0) grid[:3] += gaussian(offset)[0] * self.rand_3d_elastic.magnitude grid = self.rand_3d_elastic.rand_affine_grid(grid=grid) diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index 05e45bf26f..a627a7544a 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -541,7 +541,9 @@ def create_grid( spatial_size: Sequence[int], spacing: Optional[Sequence[float]] = None, homogeneous: bool = True, - dtype: DtypeLike = float, + dtype=float, + device: Optional[torch.device] = None, + backend=TransformBackends.NUMPY, ): """ compute a `spatial_size` mesh. @@ -551,6 +553,26 @@ def create_grid( spacing: same len as ``spatial_size``, defaults to 1.0 (dense grid). homogeneous: whether to make homogeneous coordinates. dtype: output grid data type. + device: device to compute and store the output (when the backend is "torch"). + backend: APIs to use, ``numpy`` or ``torch``. + + """ + _backend = look_up_option(backend, TransformBackends) + if _backend == TransformBackends.NUMPY: + return _create_grid_numpy(spatial_size, spacing, homogeneous, dtype) + if _backend == TransformBackends.TORCH: + return _create_grid_torch(spatial_size, spacing, homogeneous, dtype, device) + raise ValueError("backend {} is not supported".format(backend)) + + +def _create_grid_numpy( + spatial_size: Sequence[int], + spacing: Optional[Sequence[float]] = None, + homogeneous: bool = True, + dtype: DtypeLike = float, +): + """ + compute a `spatial_size` mesh with the numpy API. """ spacing = spacing or tuple(1.0 for _ in spatial_size) ranges = [np.linspace(-(d - 1.0) / 2.0 * s, (d - 1.0) / 2.0 * s, int(d)) for d, s in zip(spatial_size, spacing)] @@ -560,23 +582,58 @@ def create_grid( return np.concatenate([coords, np.ones_like(coords[:1])]) +def _create_grid_torch( + spatial_size: Sequence[int], + spacing: Optional[Sequence[float]] = None, + homogeneous: bool = True, + dtype=torch.float32, + device: Optional[torch.device] = None, +): + """ + compute a `spatial_size` mesh with the torch API. + """ + spacing = spacing or tuple(1.0 for _ in spatial_size) + ranges = [ + torch.linspace(-(d - 1.0) / 2.0 * s, (d - 1.0) / 2.0 * s, int(d), device=device, dtype=dtype) + for d, s in zip(spatial_size, spacing) + ] + coords = torch.meshgrid(*ranges) + if not homogeneous: + return torch.stack(coords) + return torch.stack([*coords, torch.ones_like(coords[0])]) + + def create_control_grid( - spatial_shape: Sequence[int], spacing: Sequence[float], homogeneous: bool = True, dtype: DtypeLike = float + spatial_shape: Sequence[int], + spacing: Sequence[float], + homogeneous: bool = True, + dtype: DtypeLike = float, + device: Optional[torch.device] = None, + backend=TransformBackends.NUMPY, ): """ control grid with two additional point in each direction """ + torch_backend = look_up_option(backend, TransformBackends) == TransformBackends.TORCH + ceil_func: Callable = torch.ceil if torch_backend else np.ceil # type: ignore grid_shape = [] for d, s in zip(spatial_shape, spacing): - d = int(d) + d = torch.as_tensor(d, device=device) if torch_backend else int(d) # type: ignore if d % 2 == 0: - grid_shape.append(np.ceil((d - 1.0) / (2.0 * s) + 0.5) * 2.0 + 2.0) + grid_shape.append(ceil_func((d - 1.0) / (2.0 * s) + 0.5) * 2.0 + 2.0) else: - grid_shape.append(np.ceil((d - 1.0) / (2.0 * s)) * 2.0 + 3.0) - return create_grid(grid_shape, spacing, homogeneous, dtype) + grid_shape.append(ceil_func((d - 1.0) / (2.0 * s)) * 2.0 + 3.0) + return create_grid( + spatial_size=grid_shape, spacing=spacing, homogeneous=homogeneous, dtype=dtype, device=device, backend=backend + ) -def create_rotate(spatial_dims: int, radians: Union[Sequence[float], float]) -> np.ndarray: +def create_rotate( + spatial_dims: int, + radians: Union[Sequence[float], float], + device: Optional[torch.device] = None, + backend=TransformBackends.NUMPY, +) -> NdarrayOrTensor: """ create a 2D or 3D rotation matrix @@ -585,48 +642,83 @@ def create_rotate(spatial_dims: int, radians: Union[Sequence[float], float]) -> radians: rotation radians when spatial_dims == 3, the `radians` sequence corresponds to rotation in the 1st, 2nd, and 3rd dim respectively. + device: device to compute and store the output (when the backend is "torch"). + backend: APIs to use, ``numpy`` or ``torch``. Raises: ValueError: When ``radians`` is empty. ValueError: When ``spatial_dims`` is not one of [2, 3]. """ + _backend = look_up_option(backend, TransformBackends) + if _backend == TransformBackends.NUMPY: + return _create_rotate( + spatial_dims=spatial_dims, radians=radians, sin_func=np.sin, cos_func=np.cos, eye_func=np.eye + ) + if _backend == TransformBackends.TORCH: + return _create_rotate( + spatial_dims=spatial_dims, + radians=radians, + sin_func=lambda th: torch.sin(torch.as_tensor(th, dtype=torch.float32, device=device)), + cos_func=lambda th: torch.cos(torch.as_tensor(th, dtype=torch.float32, device=device)), + eye_func=lambda rank: torch.eye(rank, device=device), + ) + raise ValueError("backend {} is not supported".format(backend)) + + +def _create_rotate( + spatial_dims: int, + radians: Union[Sequence[float], float], + sin_func: Callable = np.sin, + cos_func: Callable = np.cos, + eye_func: Callable = np.eye, +) -> NdarrayOrTensor: radians = ensure_tuple(radians) if spatial_dims == 2: if len(radians) >= 1: - sin_, cos_ = np.sin(radians[0]), np.cos(radians[0]) - return np.array([[cos_, -sin_, 0.0], [sin_, cos_, 0.0], [0.0, 0.0, 1.0]]) + sin_, cos_ = sin_func(radians[0]), cos_func(radians[0]) + out = eye_func(3) + out[0, 0], out[0, 1] = cos_, -sin_ + out[1, 0], out[1, 1] = sin_, cos_ + return out # type: ignore raise ValueError("radians must be non empty.") if spatial_dims == 3: affine = None if len(radians) >= 1: - sin_, cos_ = np.sin(radians[0]), np.cos(radians[0]) - affine = np.array( - [[1.0, 0.0, 0.0, 0.0], [0.0, cos_, -sin_, 0.0], [0.0, sin_, cos_, 0.0], [0.0, 0.0, 0.0, 1.0]] - ) + sin_, cos_ = sin_func(radians[0]), cos_func(radians[0]) + affine = eye_func(4) + affine[1, 1], affine[1, 2] = cos_, -sin_ + affine[2, 1], affine[2, 2] = sin_, cos_ if len(radians) >= 2: - sin_, cos_ = np.sin(radians[1]), np.cos(radians[1]) + sin_, cos_ = sin_func(radians[1]), cos_func(radians[1]) if affine is None: raise ValueError("Affine should be a matrix.") - affine = affine @ np.array( - [[cos_, 0.0, sin_, 0.0], [0.0, 1.0, 0.0, 0.0], [-sin_, 0.0, cos_, 0.0], [0.0, 0.0, 0.0, 1.0]] - ) + _affine = eye_func(4) + _affine[0, 0], _affine[0, 2] = cos_, sin_ + _affine[2, 0], _affine[2, 2] = -sin_, cos_ + affine = affine @ _affine if len(radians) >= 3: - sin_, cos_ = np.sin(radians[2]), np.cos(radians[2]) + sin_, cos_ = sin_func(radians[2]), cos_func(radians[2]) if affine is None: raise ValueError("Affine should be a matrix.") - affine = affine @ np.array( - [[cos_, -sin_, 0.0, 0.0], [sin_, cos_, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] - ) + _affine = eye_func(4) + _affine[0, 0], _affine[0, 1] = cos_, -sin_ + _affine[1, 0], _affine[1, 1] = sin_, cos_ + affine = affine @ _affine if affine is None: raise ValueError("radians must be non empty.") - return affine + return affine # type: ignore raise ValueError(f"Unsupported spatial_dims: {spatial_dims}, available options are [2, 3].") -def create_shear(spatial_dims: int, coefs: Union[Sequence[float], float]) -> np.ndarray: +def create_shear( + spatial_dims: int, + coefs: Union[Sequence[float], float], + device: Optional[torch.device] = None, + backend=TransformBackends.NUMPY, +) -> NdarrayOrTensor: """ create a shearing matrix @@ -642,51 +734,109 @@ def create_shear(spatial_dims: int, coefs: Union[Sequence[float], float]) -> np. [0.0, 0.0, 0.0, 1.0], ] + device: device to compute and store the output (when the backend is "torch"). + backend: APIs to use, ``numpy`` or ``torch``. + Raises: NotImplementedError: When ``spatial_dims`` is not one of [2, 3]. """ + _backend = look_up_option(backend, TransformBackends) + if _backend == TransformBackends.NUMPY: + return _create_shear(spatial_dims=spatial_dims, coefs=coefs, eye_func=np.eye) + if _backend == TransformBackends.TORCH: + return _create_shear( + spatial_dims=spatial_dims, coefs=coefs, eye_func=lambda rank: torch.eye(rank, device=device) + ) + raise ValueError("backend {} is not supported".format(backend)) + + +def _create_shear(spatial_dims: int, coefs: Union[Sequence[float], float], eye_func=np.eye) -> NdarrayOrTensor: if spatial_dims == 2: coefs = ensure_tuple_size(coefs, dim=2, pad_val=0.0) - return np.array([[1, coefs[0], 0.0], [coefs[1], 1.0, 0.0], [0.0, 0.0, 1.0]]) + out = eye_func(3) + out[0, 1], out[1, 0] = coefs[0], coefs[1] + return out # type: ignore if spatial_dims == 3: coefs = ensure_tuple_size(coefs, dim=6, pad_val=0.0) - return np.array( - [ - [1.0, coefs[0], coefs[1], 0.0], - [coefs[2], 1.0, coefs[3], 0.0], - [coefs[4], coefs[5], 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0], - ] - ) + out = eye_func(4) + out[0, 1], out[0, 2] = coefs[0], coefs[1] + out[1, 0], out[1, 2] = coefs[2], coefs[3] + out[2, 0], out[2, 1] = coefs[4], coefs[5] + return out # type: ignore raise NotImplementedError("Currently only spatial_dims in [2, 3] are supported.") -def create_scale(spatial_dims: int, scaling_factor: Union[Sequence[float], float]): +def create_scale( + spatial_dims: int, + scaling_factor: Union[Sequence[float], float], + device: Optional[torch.device] = None, + backend=TransformBackends.NUMPY, +) -> NdarrayOrTensor: """ create a scaling matrix Args: spatial_dims: spatial rank scaling_factor: scaling factors for every spatial dim, defaults to 1. - """ + device: device to compute and store the output (when the backend is "torch"). + backend: APIs to use, ``numpy`` or ``torch``. + """ + _backend = look_up_option(backend, TransformBackends) + if _backend == TransformBackends.NUMPY: + return _create_scale(spatial_dims=spatial_dims, scaling_factor=scaling_factor, array_func=np.diag) + if _backend == TransformBackends.TORCH: + return _create_scale( + spatial_dims=spatial_dims, + scaling_factor=scaling_factor, + array_func=lambda x: torch.diag(torch.as_tensor(x, device=device)), + ) + raise ValueError("backend {} is not supported".format(backend)) + + +def _create_scale( + spatial_dims: int, scaling_factor: Union[Sequence[float], float], array_func=np.diag +) -> NdarrayOrTensor: scaling_factor = ensure_tuple_size(scaling_factor, dim=spatial_dims, pad_val=1.0) - return np.diag(scaling_factor[:spatial_dims] + (1.0,)) + return array_func(scaling_factor[:spatial_dims] + (1.0,)) # type: ignore -def create_translate(spatial_dims: int, shift: Union[Sequence[float], float]) -> np.ndarray: +def create_translate( + spatial_dims: int, + shift: Union[Sequence[float], float], + device: Optional[torch.device] = None, + backend=TransformBackends.NUMPY, +) -> NdarrayOrTensor: """ create a translation matrix Args: spatial_dims: spatial rank shift: translate pixel/voxel for every spatial dim, defaults to 0. - """ + device: device to compute and store the output (when the backend is "torch"). + backend: APIs to use, ``numpy`` or ``torch``. + """ + _backend = look_up_option(backend, TransformBackends) + if _backend == TransformBackends.NUMPY: + return _create_translate(spatial_dims=spatial_dims, shift=shift, eye_func=np.eye, array_func=np.asarray) + if _backend == TransformBackends.TORCH: + return _create_translate( + spatial_dims=spatial_dims, + shift=shift, + eye_func=lambda x: torch.eye(torch.as_tensor(x), device=device), # type: ignore + array_func=lambda x: torch.as_tensor(x, device=device), # type: ignore + ) + raise ValueError("backend {} is not supported".format(backend)) + + +def _create_translate( + spatial_dims: int, shift: Union[Sequence[float], float], eye_func=np.eye, array_func=np.asarray +) -> NdarrayOrTensor: shift = ensure_tuple(shift) - affine = np.eye(spatial_dims + 1) + affine = eye_func(spatial_dims + 1) for i, a in enumerate(shift[:spatial_dims]): affine[i, spatial_dims] = a - return np.asarray(affine) + return array_func(affine) # type: ignore def generate_spatial_bounding_box( diff --git a/monai/utils/type_conversion.py b/monai/utils/type_conversion.py index 3636dbc6c0..87095fef99 100644 --- a/monai/utils/type_conversion.py +++ b/monai/utils/type_conversion.py @@ -6,6 +6,7 @@ from monai.config.type_definitions import DtypeLike, NdarrayOrTensor from monai.utils import optional_import +from monai.utils.module import look_up_option cp, has_cp = optional_import("cupy") cp_ndarray, _ = optional_import("cupy", name="ndarray") @@ -41,33 +42,34 @@ def dtype_torch_to_numpy(dtype): """Convert a torch dtype to its numpy equivalent.""" - if dtype not in _torch_to_np_dtype: - raise ValueError(f"Unsupported torch to numpy dtype '{dtype}'.") - return _torch_to_np_dtype[dtype] + return look_up_option(dtype, _torch_to_np_dtype) def dtype_numpy_to_torch(dtype): """Convert a numpy dtype to its torch equivalent.""" # np dtypes can be given as np.float32 and np.dtype(np.float32) so unify them dtype = np.dtype(dtype) if type(dtype) is type else dtype - if dtype not in _np_to_torch_dtype: - raise ValueError(f"Unsupported numpy to torch dtype '{dtype}'.") - return _np_to_torch_dtype[dtype] + return look_up_option(dtype, _np_to_torch_dtype) def get_equivalent_dtype(dtype, data_type): """Convert to the `dtype` that corresponds to `data_type`. - Example: + + Example:: + im = torch.tensor(1) dtype = get_equivalent_dtype(np.float32, type(im)) + """ if dtype is None: return None if data_type is torch.Tensor: if type(dtype) is torch.dtype: + # already a torch dtype and target `data_type` is torch.Tensor return dtype return dtype_numpy_to_torch(dtype) if type(dtype) is not torch.dtype: + # assuming the dtype is ok if it is not a torch dtype and target `data_type` is not torch.Tensor return dtype return dtype_torch_to_numpy(dtype) @@ -193,12 +195,11 @@ def convert_to_cupy(data, dtype, wrap_sequence: bool = True): elif isinstance(data, dict): return {k: convert_to_cupy(v, dtype) for k, v in data.items()} # make it contiguous - if isinstance(data, cp.ndarray): - if data.ndim > 0: - data = cp.ascontiguousarray(data) - else: + if not isinstance(data, cp.ndarray): raise ValueError(f"The input data type [{type(data)}] cannot be converted into cupy arrays!") + if data.ndim > 0: + data = cp.ascontiguousarray(data) return data @@ -220,6 +221,15 @@ def convert_data_type( If left blank, it remains unchanged. Returns: modified data, orig_type, orig_device + + Note: + When both `output_type` and `dtype` are specified with different backend + (e.g., `torch.Tensor` and `np.float32`), the `output_type` will be used as the primary type, + for example:: + + >>> convert_data_type(1, torch.Tensor, dtype=np.float32) + (1.0, , None) + """ orig_type: Any if isinstance(data, torch.Tensor): diff --git a/tests/test_center_scale_crop.py b/tests/test_center_scale_crop.py index e28849ce90..4c5bfc4fac 100644 --- a/tests/test_center_scale_crop.py +++ b/tests/test_center_scale_crop.py @@ -38,11 +38,13 @@ class TestCenterScaleCrop(unittest.TestCase): @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_3]) def test_shape(self, input_param, input_data, expected_shape): result = CenterScaleCrop(**input_param)(input_data) + self.assertEqual(isinstance(result, torch.Tensor), isinstance(input_data, torch.Tensor)) np.testing.assert_allclose(result.shape, expected_shape) @parameterized.expand([TEST_CASE_2]) def test_value(self, input_param, input_data, expected_value): result = CenterScaleCrop(**input_param)(input_data) + self.assertEqual(isinstance(result, torch.Tensor), isinstance(input_data, torch.Tensor)) np.testing.assert_allclose(result, expected_value) diff --git a/tests/test_center_spatial_crop.py b/tests/test_center_spatial_crop.py index 3e828176a5..d6a7edb305 100644 --- a/tests/test_center_spatial_crop.py +++ b/tests/test_center_spatial_crop.py @@ -38,11 +38,13 @@ class TestCenterSpatialCrop(unittest.TestCase): @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_3]) def test_shape(self, input_param, input_data, expected_shape): result = CenterSpatialCrop(**input_param)(input_data) + self.assertEqual(isinstance(result, torch.Tensor), isinstance(input_data, torch.Tensor)) np.testing.assert_allclose(result.shape, expected_shape) @parameterized.expand([TEST_CASE_2]) def test_value(self, input_param, input_data, expected_value): result = CenterSpatialCrop(**input_param)(input_data) + self.assertEqual(isinstance(result, torch.Tensor), isinstance(input_data, torch.Tensor)) np.testing.assert_allclose(result, expected_value) diff --git a/tests/test_center_spatial_cropd.py b/tests/test_center_spatial_cropd.py index 349253ab56..8ffcdf4387 100644 --- a/tests/test_center_spatial_cropd.py +++ b/tests/test_center_spatial_cropd.py @@ -15,36 +15,51 @@ from parameterized import parameterized from monai.transforms import CenterSpatialCropd +from tests.utils import TEST_NDARRAYS, assert_allclose -TEST_CASE_0 = [ - {"keys": "img", "roi_size": [2, -1, -1]}, - {"img": np.random.randint(0, 2, size=[3, 3, 3, 3])}, - (3, 2, 3, 3), -] +TEST_SHAPES = [] +for p in TEST_NDARRAYS: + TEST_SHAPES.append( + [ + {"keys": "img", "roi_size": [2, -1, -1]}, + {"img": p(np.random.randint(0, 2, size=[3, 3, 3, 3]))}, + (3, 2, 3, 3), + ] + ) -TEST_CASE_1 = [ - {"keys": "img", "roi_size": [2, 2, 2]}, - {"img": np.random.randint(0, 2, size=[3, 3, 3, 3])}, - (3, 2, 2, 2), -] + TEST_SHAPES.append( + [ + {"keys": "img", "roi_size": [2, 2, 2]}, + {"img": p(np.random.randint(0, 2, size=[3, 3, 3, 3]))}, + (3, 2, 2, 2), + ] + ) -TEST_CASE_2 = [ - {"keys": "img", "roi_size": [2, 2]}, - {"img": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]])}, - np.array([[[1, 2], [2, 3]]]), -] +TEST_CASES = [] +for p in TEST_NDARRAYS: + TEST_CASES.append( + [ + {"keys": "img", "roi_size": [2, 2]}, + { + "img": p( + np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]]) + ) + }, + p(np.array([[[1, 2], [2, 3]]])), + ] + ) class TestCenterSpatialCropd(unittest.TestCase): - @parameterized.expand([TEST_CASE_0, TEST_CASE_1]) + @parameterized.expand(TEST_SHAPES) def test_shape(self, input_param, input_data, expected_shape): result = CenterSpatialCropd(**input_param)(input_data) self.assertTupleEqual(result["img"].shape, expected_shape) - @parameterized.expand([TEST_CASE_2]) + @parameterized.expand(TEST_CASES) def test_value(self, input_param, input_data, expected_value): result = CenterSpatialCropd(**input_param)(input_data) - np.testing.assert_allclose(result["img"], expected_value) + assert_allclose(result["img"], expected_value, type_test=False) if __name__ == "__main__": diff --git a/tests/test_create_grid_and_affine.py b/tests/test_create_grid_and_affine.py index 0c0e52e04a..b53eaa5b9d 100644 --- a/tests/test_create_grid_and_affine.py +++ b/tests/test_create_grid_and_affine.py @@ -12,6 +12,7 @@ import unittest import numpy as np +import torch from monai.transforms import ( create_control_grid, @@ -21,6 +22,7 @@ create_shear, create_translate, ) +from tests.utils import assert_allclose class TestCreateGrid(unittest.TestCase): @@ -32,50 +34,47 @@ def test_create_grid(self): with self.assertRaisesRegex(TypeError, ""): create_grid((1, 1), spacing=2.0) - g = create_grid((1, 1)) - expected = np.array([[[0.0]], [[0.0]], [[1.0]]]) - np.testing.assert_allclose(g, expected) + test_assert(create_grid, ((1, 1),), np.array([[[0.0]], [[0.0]], [[1.0]]])) - g = create_grid((1, 1), homogeneous=False) - expected = np.array([[[0.0]], [[0.0]]]) - np.testing.assert_allclose(g, expected) + test_assert(create_grid, ((1, 1), None, False), np.array([[[0.0]], [[0.0]]])) - g = create_grid((1, 1), spacing=(1.2, 1.3)) - expected = np.array([[[0.0]], [[0.0]], [[1.0]]]) - np.testing.assert_allclose(g, expected) + test_assert(create_grid, ((1, 1), (1.2, 1.3)), np.array([[[0.0]], [[0.0]], [[1.0]]])) - g = create_grid((1, 1, 1), spacing=(1.2, 1.3, 1.0)) - expected = np.array([[[[0.0]]], [[[0.0]]], [[[0.0]]], [[[1.0]]]]) - np.testing.assert_allclose(g, expected) + test_assert(create_grid, ((1, 1, 1), (1.2, 1.3, 1.0)), np.array([[[[0.0]]], [[[0.0]]], [[[0.0]]], [[[1.0]]]])) - g = create_grid((1, 1, 1), spacing=(1.2, 1.3, 1.0), homogeneous=False) - expected = np.array([[[[0.0]]], [[[0.0]]], [[[0.0]]]]) - np.testing.assert_allclose(g, expected) + test_assert(create_grid, ((1, 1, 1), (1.2, 1.3, 1.0), False), np.array([[[[0.0]]], [[[0.0]]], [[[0.0]]]])) g = create_grid((1, 1, 1), spacing=(1.2, 1.3, 1.0), dtype=np.int32) np.testing.assert_equal(g.dtype, np.int32) - g = create_grid((2, 2, 2)) - expected = np.array( - [ - [[[-0.5, -0.5], [-0.5, -0.5]], [[0.5, 0.5], [0.5, 0.5]]], - [[[-0.5, -0.5], [0.5, 0.5]], [[-0.5, -0.5], [0.5, 0.5]]], - [[[-0.5, 0.5], [-0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]]], - [[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]], - ] + g = create_grid((1, 1, 1), spacing=(1.2, 1.3, 1.0), dtype=torch.float64, backend="torch") + np.testing.assert_equal(g.dtype, torch.float64) + + test_assert( + create_grid, + ((2, 2, 2),), + np.array( + [ + [[[-0.5, -0.5], [-0.5, -0.5]], [[0.5, 0.5], [0.5, 0.5]]], + [[[-0.5, -0.5], [0.5, 0.5]], [[-0.5, -0.5], [0.5, 0.5]]], + [[[-0.5, 0.5], [-0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]]], + [[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]], + ] + ), ) - np.testing.assert_allclose(g, expected) - g = create_grid((2, 2, 2), spacing=(1.2, 1.3, 1.0)) - expected = np.array( - [ - [[[-0.6, -0.6], [-0.6, -0.6]], [[0.6, 0.6], [0.6, 0.6]]], - [[[-0.65, -0.65], [0.65, 0.65]], [[-0.65, -0.65], [0.65, 0.65]]], - [[[-0.5, 0.5], [-0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]]], - [[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]], - ] + test_assert( + create_grid, + ((2, 2, 2), (1.2, 1.3, 1.0)), + np.array( + [ + [[[-0.6, -0.6], [-0.6, -0.6]], [[0.6, 0.6], [0.6, 0.6]]], + [[[-0.65, -0.65], [0.65, 0.65]], [[-0.65, -0.65], [0.65, 0.65]]], + [[[-0.5, 0.5], [-0.5, 0.5]], [[-0.5, 0.5], [-0.5, 0.5]]], + [[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]], + ] + ), ) - np.testing.assert_allclose(g, expected) def test_create_control_grid(self): with self.assertRaisesRegex(TypeError, ""): @@ -83,72 +82,87 @@ def test_create_control_grid(self): with self.assertRaisesRegex(TypeError, ""): create_control_grid((1, 1), 2.0) - g = create_control_grid((1.0, 1.0), (1.0, 1.0)) - expected = np.array( - [ - [[-1.0, -1.0, -1.0], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0]], - [[-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0]], - [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], - ] + test_assert( + create_control_grid, + ((1.0, 1.0), (1.0, 1.0)), + np.array( + [ + [[-1.0, -1.0, -1.0], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0]], + [[-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ), ) - np.testing.assert_allclose(g, expected) - g = create_control_grid((1.0, 1.0), (2.0, 2.0)) - expected = np.array( - [ - [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], - [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], - [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], - ] + test_assert( + create_control_grid, + ((1.0, 1.0), (2.0, 2.0)), + np.array( + [ + [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], + [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ), ) - np.testing.assert_allclose(g, expected) - g = create_control_grid((2.0, 2.0), (1.0, 1.0)) - expected = np.array( - [ - [[-1.5, -1.5, -1.5, -1.5], [-0.5, -0.5, -0.5, -0.5], [0.5, 0.5, 0.5, 0.5], [1.5, 1.5, 1.5, 1.5]], - [[-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5]], - [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], - ] + test_assert( + create_control_grid, + ((2.0, 2.0), (1.0, 1.0)), + np.array( + [ + [[-1.5, -1.5, -1.5, -1.5], [-0.5, -0.5, -0.5, -0.5], [0.5, 0.5, 0.5, 0.5], [1.5, 1.5, 1.5, 1.5]], + [[-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5]], + [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], + ] + ), ) - np.testing.assert_allclose(g, expected) - g = create_control_grid((2.0, 2.0), (2.0, 2.0)) - expected = np.array( - [ - [[-3.0, -3.0, -3.0, -3.0], [-1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0]], - [[-3.0, -1.0, 1.0, 3.0], [-3.0, -1.0, 1.0, 3.0], [-3.0, -1.0, 1.0, 3.0], [-3.0, -1.0, 1.0, 3.0]], - [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], - ] + test_assert( + create_control_grid, + ((2.0, 2.0), (2.0, 2.0)), + np.array( + [ + [[-3.0, -3.0, -3.0, -3.0], [-1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0], [3.0, 3.0, 3.0, 3.0]], + [[-3.0, -1.0, 1.0, 3.0], [-3.0, -1.0, 1.0, 3.0], [-3.0, -1.0, 1.0, 3.0], [-3.0, -1.0, 1.0, 3.0]], + [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], + ] + ), ) - np.testing.assert_allclose(g, expected) - g = create_control_grid((1.0, 1.0, 1.0), (2.0, 2.0, 2.0), homogeneous=False) - expected = np.array( - [ - [ - [[-2.0, -2.0, -2.0], [-2.0, -2.0, -2.0], [-2.0, -2.0, -2.0]], - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], - [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], - ], - [ - [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], - [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], - [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], - ], + test_assert( + create_control_grid, + ((1.0, 1.0, 1.0), (2.0, 2.0, 2.0), False), + np.array( [ - [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], - [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], - [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], - ], - ] + [ + [[-2.0, -2.0, -2.0], [-2.0, -2.0, -2.0], [-2.0, -2.0, -2.0]], + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], + [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], + ], + [ + [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], + [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], + [[-2.0, -2.0, -2.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], + ], + [ + [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], + [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], + [[-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0], [-2.0, 0.0, 2.0]], + ], + ] + ), ) - np.testing.assert_allclose(g, expected) def test_assert(func, params, expected): - m = func(*params) - np.testing.assert_allclose(m, expected, atol=1e-7) + gpu_test = ("torch_gpu",) if torch.cuda.is_available() else () + for b in ("torch", "numpy") + gpu_test: + if b == "torch_gpu": + m = func(*params, device="cuda:0", backend="torch") + else: + m = func(*params, backend=b) + assert_allclose(m, expected, type_test=False, atol=1e-7) class TestCreateAffine(unittest.TestCase): diff --git a/tests/test_get_equivalent_dtype.py b/tests/test_get_equivalent_dtype.py index 04ba5ae5fb..de2379b15b 100644 --- a/tests/test_get_equivalent_dtype.py +++ b/tests/test_get_equivalent_dtype.py @@ -32,6 +32,14 @@ def test_get_equivalent_dtype(self, im, input_dtype): out_dtype = get_equivalent_dtype(input_dtype, type(im)) self.assertEqual(out_dtype, im.dtype) + def test_native_type(self): + """the get_equivalent_dtype currently doesn't change the build-in type""" + n_type = [float, int, bool] + for n in n_type: + for im_dtype in DTYPES: + out_dtype = get_equivalent_dtype(n, type(im_dtype)) + self.assertEqual(out_dtype, n) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_deform_grid.py b/tests/test_rand_deform_grid.py index 7c12c263d2..4725e28339 100644 --- a/tests/test_rand_deform_grid.py +++ b/tests/test_rand_deform_grid.py @@ -12,10 +12,10 @@ import unittest import numpy as np -import torch from parameterized import parameterized from monai.transforms import RandDeformGrid +from tests.utils import assert_allclose TEST_CASES = [ [ @@ -129,11 +129,7 @@ def test_rand_deform_grid(self, input_param, input_data, expected_val): g = RandDeformGrid(**input_param) g.set_random_state(123) result = g(**input_data) - self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) - if isinstance(result, torch.Tensor): - np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) - else: - np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) + assert_allclose(result, expected_val, type_test=False, rtol=1e-3, atol=1e-3) if __name__ == "__main__": diff --git a/tests/test_rand_scale_crop.py b/tests/test_rand_scale_crop.py index db5487ebff..a0c5471ffb 100644 --- a/tests/test_rand_scale_crop.py +++ b/tests/test_rand_scale_crop.py @@ -15,6 +15,7 @@ from parameterized import parameterized from monai.transforms import RandScaleCrop +from tests.utils import TEST_NDARRAYS, assert_allclose TEST_CASE_1 = [ {"roi_scale": [1.0, 1.0, -1.0], "random_center": True}, @@ -55,22 +56,25 @@ class TestRandScaleCrop(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_shape(self, input_param, input_data, expected_shape): - result = RandScaleCrop(**input_param)(input_data) - self.assertTupleEqual(result.shape, expected_shape) + for p in TEST_NDARRAYS: + result = RandScaleCrop(**input_param)(p(input_data)) + self.assertTupleEqual(result.shape, expected_shape) @parameterized.expand([TEST_CASE_3]) def test_value(self, input_param, input_data): - cropper = RandScaleCrop(**input_param) - result = cropper(input_data) - roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size] - np.testing.assert_allclose(result, input_data[:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]]) + for p in TEST_NDARRAYS: + cropper = RandScaleCrop(**input_param) + result = cropper(p(input_data)) + roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size] + assert_allclose(result, input_data[:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]], type_test=False) @parameterized.expand([TEST_CASE_4, TEST_CASE_5, TEST_CASE_6]) def test_random_shape(self, input_param, input_data, expected_shape): - cropper = RandScaleCrop(**input_param) - cropper.set_random_state(seed=123) - result = cropper(input_data) - self.assertTupleEqual(result.shape, expected_shape) + for p in TEST_NDARRAYS: + cropper = RandScaleCrop(**input_param) + cropper.set_random_state(seed=123) + result = cropper(p(input_data)) + self.assertTupleEqual(result.shape, expected_shape) if __name__ == "__main__": diff --git a/tests/test_rand_scale_cropd.py b/tests/test_rand_scale_cropd.py index 265c6c467d..f78a81d339 100644 --- a/tests/test_rand_scale_cropd.py +++ b/tests/test_rand_scale_cropd.py @@ -15,6 +15,7 @@ from parameterized import parameterized from monai.transforms import RandScaleCropd +from tests.utils import TEST_NDARRAYS, assert_allclose TEST_CASE_1 = [ {"keys": "img", "roi_scale": [1.0, 1.0, -1.0], "random_center": True}, @@ -66,10 +67,14 @@ def test_shape(self, input_param, input_data, expected_shape): @parameterized.expand([TEST_CASE_3]) def test_value(self, input_param, input_data): - cropper = RandScaleCropd(**input_param) - result = cropper(input_data) - roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size] - np.testing.assert_allclose(result["img"], input_data["img"][:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]]) + for p in TEST_NDARRAYS: + cropper = RandScaleCropd(**input_param) + input_data["img"] = p(input_data["img"]) + result = cropper(input_data) + roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size] + assert_allclose( + result["img"], input_data["img"][:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]], type_test=False + ) @parameterized.expand([TEST_CASE_4, TEST_CASE_5, TEST_CASE_6]) def test_random_shape(self, input_param, input_data, expected_shape): diff --git a/tests/test_rand_spatial_crop.py b/tests/test_rand_spatial_crop.py index 01e057e589..19b1841c6d 100644 --- a/tests/test_rand_spatial_crop.py +++ b/tests/test_rand_spatial_crop.py @@ -15,6 +15,7 @@ from parameterized import parameterized from monai.transforms import RandSpatialCrop +from tests.utils import TEST_NDARRAYS, assert_allclose TEST_CASE_0 = [ {"roi_size": [3, 3, -1], "random_center": True}, @@ -56,10 +57,11 @@ def test_shape(self, input_param, input_data, expected_shape): @parameterized.expand([TEST_CASE_3]) def test_value(self, input_param, input_data): - cropper = RandSpatialCrop(**input_param) - result = cropper(input_data) - roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size] - np.testing.assert_allclose(result, input_data[:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]]) + for p in TEST_NDARRAYS: + cropper = RandSpatialCrop(**input_param) + result = cropper(p(input_data)) + roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size] + assert_allclose(result, input_data[:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]], type_test=False) @parameterized.expand([TEST_CASE_4, TEST_CASE_5]) def test_random_shape(self, input_param, input_data, expected_shape): diff --git a/tests/test_rand_spatial_crop_samples.py b/tests/test_rand_spatial_crop_samples.py index 0ade9bbbba..eefe7d0e0a 100644 --- a/tests/test_rand_spatial_crop_samples.py +++ b/tests/test_rand_spatial_crop_samples.py @@ -15,6 +15,7 @@ from parameterized import parameterized from monai.transforms import RandSpatialCropSamples +from tests.utils import TEST_NDARRAYS, assert_allclose TEST_CASE_1 = [ {"roi_size": [3, 3, 3], "num_samples": 4, "random_center": True, "random_size": False}, @@ -70,14 +71,15 @@ class TestRandSpatialCropSamples(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_shape(self, input_param, input_data, expected_shape, expected_last_item): - xform = RandSpatialCropSamples(**input_param) - xform.set_random_state(1234) - result = xform(input_data) + for p in TEST_NDARRAYS: + xform = RandSpatialCropSamples(**input_param) + xform.set_random_state(1234) + result = xform(p(input_data)) - np.testing.assert_equal(len(result), input_param["num_samples"]) - for item, expected in zip(result, expected_shape): - self.assertTupleEqual(item.shape, expected) - np.testing.assert_allclose(result[-1], expected_last_item) + np.testing.assert_equal(len(result), input_param["num_samples"]) + for item, expected in zip(result, expected_shape): + self.assertTupleEqual(item.shape, expected) + assert_allclose(result[-1], expected_last_item, type_test=False) if __name__ == "__main__": diff --git a/tests/test_rand_spatial_crop_samplesd.py b/tests/test_rand_spatial_crop_samplesd.py index 3f5eee7b27..4b41ce3344 100644 --- a/tests/test_rand_spatial_crop_samplesd.py +++ b/tests/test_rand_spatial_crop_samplesd.py @@ -15,6 +15,7 @@ from parameterized import parameterized from monai.transforms import Compose, RandSpatialCropSamplesd, ToTensord +from tests.utils import TEST_NDARRAYS, assert_allclose TEST_CASE_1 = [ {"keys": ["img", "seg"], "num_samples": 4, "roi_size": [2, 2, 2], "random_center": True}, @@ -38,31 +39,48 @@ }, ] -TEST_CASE_2 = [ - {"keys": ["img", "seg"], "num_samples": 8, "roi_size": [2, 2, 3], "random_center": False}, - {"img": np.arange(81).reshape(3, 3, 3, 3), "seg": np.arange(81, 0, -1).reshape(3, 3, 3, 3)}, - [(3, 3, 3, 3), (3, 2, 3, 3), (3, 2, 2, 3), (3, 2, 3, 3), (3, 3, 3, 3), (3, 3, 3, 3), (3, 2, 2, 3), (3, 3, 2, 3)], - { - "img": np.array( +TEST_CASE_2 = [] +for p in TEST_NDARRAYS: + TEST_CASE_2.append( + [ + {"keys": ["img", "seg"], "num_samples": 8, "roi_size": [2, 2, 3], "random_center": False}, + {"img": p(np.arange(81).reshape(3, 3, 3, 3)), "seg": p(np.arange(81, 0, -1).reshape(3, 3, 3, 3))}, [ - [[[0, 1, 2], [3, 4, 5]], [[9, 10, 11], [12, 13, 14]], [[18, 19, 20], [21, 22, 23]]], - [[[27, 28, 29], [30, 31, 32]], [[36, 37, 38], [39, 40, 41]], [[45, 46, 47], [48, 49, 50]]], - [[[54, 55, 56], [57, 58, 59]], [[63, 64, 65], [66, 67, 68]], [[72, 73, 74], [75, 76, 77]]], - ] - ), - "seg": np.array( - [ - [[[81, 80, 79], [78, 77, 76]], [[72, 71, 70], [69, 68, 67]], [[63, 62, 61], [60, 59, 58]]], - [[[54, 53, 52], [51, 50, 49]], [[45, 44, 43], [42, 41, 40]], [[36, 35, 34], [33, 32, 31]]], - [[[27, 26, 25], [24, 23, 22]], [[18, 17, 16], [15, 14, 13]], [[9, 8, 7], [6, 5, 4]]], - ] - ), - }, -] + (3, 3, 3, 3), + (3, 2, 3, 3), + (3, 2, 2, 3), + (3, 2, 3, 3), + (3, 3, 3, 3), + (3, 3, 3, 3), + (3, 2, 2, 3), + (3, 3, 2, 3), + ], + { + "img": p( + np.array( + [ + [[[0, 1, 2], [3, 4, 5]], [[9, 10, 11], [12, 13, 14]], [[18, 19, 20], [21, 22, 23]]], + [[[27, 28, 29], [30, 31, 32]], [[36, 37, 38], [39, 40, 41]], [[45, 46, 47], [48, 49, 50]]], + [[[54, 55, 56], [57, 58, 59]], [[63, 64, 65], [66, 67, 68]], [[72, 73, 74], [75, 76, 77]]], + ] + ) + ), + "seg": p( + np.array( + [ + [[[81, 80, 79], [78, 77, 76]], [[72, 71, 70], [69, 68, 67]], [[63, 62, 61], [60, 59, 58]]], + [[[54, 53, 52], [51, 50, 49]], [[45, 44, 43], [42, 41, 40]], [[36, 35, 34], [33, 32, 31]]], + [[[27, 26, 25], [24, 23, 22]], [[18, 17, 16], [15, 14, 13]], [[9, 8, 7], [6, 5, 4]]], + ] + ) + ), + }, + ] + ) class TestRandSpatialCropSamplesd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) + @parameterized.expand([TEST_CASE_1, *TEST_CASE_2]) def test_shape(self, input_param, input_data, expected_shape, expected_last): xform = RandSpatialCropSamplesd(**input_param) xform.set_random_state(1234) @@ -73,8 +91,8 @@ def test_shape(self, input_param, input_data, expected_shape, expected_last): for i, item in enumerate(result): self.assertEqual(item["img_meta_dict"]["patch_index"], i) self.assertEqual(item["seg_meta_dict"]["patch_index"], i) - np.testing.assert_allclose(item["img"], expected_last["img"]) - np.testing.assert_allclose(item["seg"], expected_last["seg"]) + assert_allclose(item["img"], expected_last["img"], type_test=True) + assert_allclose(item["seg"], expected_last["seg"], type_test=True) def test_deep_copy(self): data = {"img": np.ones((1, 10, 11, 12))} diff --git a/tests/test_rand_spatial_cropd.py b/tests/test_rand_spatial_cropd.py index 610c1974aa..edcb61dc99 100644 --- a/tests/test_rand_spatial_cropd.py +++ b/tests/test_rand_spatial_cropd.py @@ -15,6 +15,7 @@ from parameterized import parameterized from monai.transforms import RandSpatialCropd +from tests.utils import TEST_NDARRAYS TEST_CASE_0 = [ {"keys": "img", "roi_size": [3, 3, -1], "random_center": True}, @@ -67,10 +68,12 @@ def test_value(self, input_param, input_data): @parameterized.expand([TEST_CASE_4, TEST_CASE_5]) def test_random_shape(self, input_param, input_data, expected_shape): - cropper = RandSpatialCropd(**input_param) - cropper.set_random_state(seed=123) - result = cropper(input_data) - self.assertTupleEqual(result["img"].shape, expected_shape) + for p in TEST_NDARRAYS: + cropper = RandSpatialCropd(**input_param) + cropper.set_random_state(seed=123) + input_data["img"] = p(input_data["img"]) + result = cropper(input_data) + self.assertTupleEqual(result["img"].shape, expected_shape) if __name__ == "__main__": diff --git a/tests/test_resize.py b/tests/test_resize.py index e5ec5dd1a9..f6c4a8b14b 100644 --- a/tests/test_resize.py +++ b/tests/test_resize.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import Resize -from tests.utils import NumpyImageTestCase2D +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose TEST_CASE_0 = [{"spatial_size": 15}, (6, 10, 15)] @@ -45,16 +45,22 @@ def test_correct_results(self, spatial_size, mode): _order = 1 if spatial_size == (32, -1): spatial_size = (32, 64) - expected = [] - for channel in self.imt[0]: - expected.append( - skimage.transform.resize( - channel, spatial_size, order=_order, clip=False, preserve_range=False, anti_aliasing=False - ) + expected = [ + skimage.transform.resize( + channel, + spatial_size, + order=_order, + clip=False, + preserve_range=False, + anti_aliasing=False, ) + for channel in self.imt[0] + ] + expected = np.stack(expected).astype(np.float32) - out = resize(self.imt[0]) - np.testing.assert_allclose(out, expected, atol=0.9) + for p in TEST_NDARRAYS: + out = resize(p(self.imt[0])) + assert_allclose(out, expected, type_test=False, atol=0.9) @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2]) def test_longest_shape(self, input_param, expected_shape): diff --git a/tests/test_resize_with_pad_or_crop.py b/tests/test_resize_with_pad_or_crop.py index 46f1fc86cc..2162a0bb1b 100644 --- a/tests/test_resize_with_pad_or_crop.py +++ b/tests/test_resize_with_pad_or_crop.py @@ -12,9 +12,11 @@ import unittest import numpy as np +import torch from parameterized import parameterized from monai.transforms import ResizeWithPadOrCrop +from tests.utils import TEST_NDARRAYS TEST_CASES = [ [ @@ -48,11 +50,16 @@ class TestResizeWithPadOrCrop(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_pad_shape(self, input_param, input_shape, expected_shape): - paddcroper = ResizeWithPadOrCrop(**input_param) - result = paddcroper(np.zeros(input_shape)) - np.testing.assert_allclose(result.shape, expected_shape) - result = paddcroper(np.zeros(input_shape), mode="constant") - np.testing.assert_allclose(result.shape, expected_shape) + for p in TEST_NDARRAYS: + if isinstance(p(0), torch.Tensor) and ( + "constant_values" in input_param or input_param["mode"] == "reflect" + ): + continue + paddcroper = ResizeWithPadOrCrop(**input_param) + result = paddcroper(p(np.zeros(input_shape))) + np.testing.assert_allclose(result.shape, expected_shape) + result = paddcroper(p(np.zeros(input_shape)), mode="constant") + np.testing.assert_allclose(result.shape, expected_shape) if __name__ == "__main__": diff --git a/tests/test_resize_with_pad_or_cropd.py b/tests/test_resize_with_pad_or_cropd.py index 32a62a9e16..58f6c92a8f 100644 --- a/tests/test_resize_with_pad_or_cropd.py +++ b/tests/test_resize_with_pad_or_cropd.py @@ -12,9 +12,11 @@ import unittest import numpy as np +import torch from parameterized import parameterized from monai.transforms import ResizeWithPadOrCropd +from tests.utils import TEST_NDARRAYS TEST_CASES = [ [ @@ -48,9 +50,15 @@ class TestResizeWithPadOrCropd(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_pad_shape(self, input_param, input_data, expected_val): - paddcroper = ResizeWithPadOrCropd(**input_param) - result = paddcroper(input_data) - np.testing.assert_allclose(result["img"].shape, expected_val) + for p in TEST_NDARRAYS: + if isinstance(p(0), torch.Tensor) and ( + "constant_values" in input_param or input_param["mode"] == "reflect" + ): + continue + paddcroper = ResizeWithPadOrCropd(**input_param) + input_data["img"] = p(input_data["img"]) + result = paddcroper(input_data) + np.testing.assert_allclose(result["img"].shape, expected_val) if __name__ == "__main__": diff --git a/tests/test_resized.py b/tests/test_resized.py index 930faf00eb..47b8e8a704 100644 --- a/tests/test_resized.py +++ b/tests/test_resized.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import Resized -from tests.utils import NumpyImageTestCase2D +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose TEST_CASE_0 = [{"keys": "img", "spatial_size": 15}, (6, 10, 15)] @@ -48,16 +48,22 @@ def test_correct_results(self, spatial_size, mode): _order = 1 if spatial_size == (32, -1): spatial_size = (32, 64) - expected = [] - for channel in self.imt[0]: - expected.append( - skimage.transform.resize( - channel, spatial_size, order=_order, clip=False, preserve_range=False, anti_aliasing=False - ) + expected = [ + skimage.transform.resize( + channel, + spatial_size, + order=_order, + clip=False, + preserve_range=False, + anti_aliasing=False, ) + for channel in self.imt[0] + ] + expected = np.stack(expected).astype(np.float32) - out = resize({"img": self.imt[0]})["img"] - np.testing.assert_allclose(out, expected, atol=0.9) + for p in TEST_NDARRAYS: + out = resize({"img": p(self.imt[0])})["img"] + assert_allclose(out, expected, type_test=False, atol=0.9) @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_longest_shape(self, input_param, expected_shape):