diff --git a/CHANGELOG.md b/CHANGELOG.md index 55a0ca11e9..bdbd23e7dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). ## [Unreleased] +* renamed model's `n_classes` to `num_classes` + ## [0.6.0] - 2021-07-08 ### Added * 10 new transforms, a masked loss wrapper, and a `NetAdapter` for transfer learning diff --git a/monai/losses/tversky.py b/monai/losses/tversky.py index 1d75b9e8cc..1cc0e1d8d7 100644 --- a/monai/losses/tversky.py +++ b/monai/losses/tversky.py @@ -155,7 +155,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: if self.reduction == LossReduction.SUM.value: return torch.sum(score) # sum over the batch and channel dims if self.reduction == LossReduction.NONE.value: - return score # returns [N, n_classes] losses + return score # returns [N, num_classes] losses if self.reduction == LossReduction.MEAN.value: return torch.mean(score) raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].') diff --git a/monai/metrics/meandice.py b/monai/metrics/meandice.py index 1bfd85a83e..226c106f7e 100644 --- a/monai/metrics/meandice.py +++ b/monai/metrics/meandice.py @@ -114,7 +114,7 @@ def compute_meandice( the predicted output. Defaults to True. Returns: - Dice scores per batch and per class, (shape [batch_size, n_classes]). + Dice scores per batch and per class, (shape [batch_size, num_classes]). Raises: ValueError: when `y_pred` and `y` have different shapes. diff --git a/monai/metrics/rocauc.py b/monai/metrics/rocauc.py index 3bd6c0d69c..c2679cc2ea 100644 --- a/monai/metrics/rocauc.py +++ b/monai/metrics/rocauc.py @@ -131,9 +131,9 @@ def compute_roc_auc( y_pred_ndim = y_pred.ndimension() y_ndim = y.ndimension() if y_pred_ndim not in (1, 2): - raise ValueError("Predictions should be of shape (batch_size, n_classes) or (batch_size, ).") + raise ValueError("Predictions should be of shape (batch_size, num_classes) or (batch_size, ).") if y_ndim not in (1, 2): - raise ValueError("Targets should be of shape (batch_size, n_classes) or (batch_size, ).") + raise ValueError("Targets should be of shape (batch_size, num_classes) or (batch_size, ).") if y_pred_ndim == 2 and y_pred.shape[1] == 1: y_pred = y_pred.squeeze(dim=-1) y_pred_ndim = 1 diff --git a/monai/networks/nets/netadapter.py b/monai/networks/nets/netadapter.py index bc88454f87..80288f7945 100644 --- a/monai/networks/nets/netadapter.py +++ b/monai/networks/nets/netadapter.py @@ -14,6 +14,7 @@ import torch from monai.networks.layers import Conv, get_pool_layer +from monai.utils import deprecated_arg class NetAdapter(torch.nn.Module): @@ -26,7 +27,7 @@ class NetAdapter(torch.nn.Module): model: a PyTorch model, support both 2D and 3D models. typically, it can be a pretrained model in Torchvision, like: ``resnet18``, ``resnet34m``, ``resnet50``, ``resnet101``, ``resnet152``, etc. more details: https://pytorch.org/vision/stable/models.html. - n_classes: number of classes for the last classification layer. Default to 1. + num_classes: number of classes for the last classification layer. Default to 1. dim: number of spatial dimensions, default to 2. in_channels: number of the input channels of last layer. if None, get it from `in_features` of last layer. use_conv: whether use convolutional layer to replace the last layer, default to False. @@ -38,17 +39,22 @@ class NetAdapter(torch.nn.Module): """ + @deprecated_arg("n_classes", since="0.6") def __init__( self, model: torch.nn.Module, - n_classes: int = 1, + num_classes: int = 1, dim: int = 2, in_channels: Optional[int] = None, use_conv: bool = False, pool: Optional[Tuple[str, Dict[str, Any]]] = ("avg", {"kernel_size": 7, "stride": 1}), bias: bool = True, + n_classes: Optional[int] = None, ): super().__init__() + # in case the new num_classes is default but you still call deprecated n_classes + if n_classes is not None and num_classes == 1: + num_classes = n_classes layers = list(model.children()) orig_fc = layers[-1] in_channels_: int @@ -74,7 +80,7 @@ def __init__( # add 1x1 conv (it behaves like a FC layer) self.fc = Conv[Conv.CONV, dim]( in_channels=in_channels_, - out_channels=n_classes, + out_channels=num_classes, kernel_size=1, bias=bias, ) @@ -84,7 +90,7 @@ def __init__( # replace the out_features of FC layer self.fc = torch.nn.Linear( in_features=in_channels_, - out_features=n_classes, + out_features=num_classes, bias=bias, ) self.use_conv = use_conv diff --git a/monai/networks/nets/resnet.py b/monai/networks/nets/resnet.py index f34de563ce..a5e6b7ab81 100644 --- a/monai/networks/nets/resnet.py +++ b/monai/networks/nets/resnet.py @@ -10,7 +10,7 @@ # limitations under the License. from functools import partial -from typing import Any, Callable, List, Type, Union +from typing import Any, Callable, List, Optional, Type, Union import torch import torch.nn as nn @@ -20,6 +20,8 @@ __all__ = ["ResNet", "resnet10", "resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "resnet200"] +from monai.utils import deprecated_arg + def get_inplanes(): return [64, 128, 256, 512] @@ -162,9 +164,10 @@ class ResNet(nn.Module): no_max_pool: bool argument to determine if to use maxpool layer. shortcut_type: which downsample block to use. widen_factor: widen output for each layer. - n_classes: number of output (classifications) + num_classes: number of output (classifications) """ + @deprecated_arg("n_classes", since="0.6") def __init__( self, block: Type[Union[ResNetBlock, ResNetBottleneck]], @@ -177,11 +180,15 @@ def __init__( no_max_pool: bool = False, shortcut_type: str = "B", widen_factor: float = 1.0, - n_classes: int = 400, + num_classes: int = 400, feed_forward: bool = True, + n_classes: Optional[int] = None, ) -> None: super(ResNet, self).__init__() + # in case the new num_classes is default but you still call deprecated n_classes + if n_classes is not None and num_classes == 400: + num_classes = n_classes conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims] norm_type: Type[Union[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims] @@ -215,7 +222,7 @@ def __init__( self.avgpool = avgp_type(block_avgpool[spatial_dims]) if feed_forward: - self.fc = nn.Linear(block_inplanes[3] * block.expansion, n_classes) + self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes) for m in self.modules(): if isinstance(m, conv_type): @@ -303,7 +310,7 @@ def _resnet( progress: bool, **kwargs: Any, ) -> ResNet: - model = ResNet(block, layers, block_inplanes, **kwargs) + model: ResNet = ResNet(block, layers, block_inplanes, **kwargs) if pretrained: # Author of paper zipped the state_dict on googledrive, # so would need to download, unzip and read (2.8gb file for a ~150mb state dict). diff --git a/monai/networks/nets/torchvision_fc.py b/monai/networks/nets/torchvision_fc.py index 2c4c7c8c32..1619f877e7 100644 --- a/monai/networks/nets/torchvision_fc.py +++ b/monai/networks/nets/torchvision_fc.py @@ -12,7 +12,7 @@ from typing import Any, Dict, Optional, Tuple, Union from monai.networks.nets import NetAdapter -from monai.utils import deprecated, optional_import +from monai.utils import deprecated, deprecated_arg, optional_import models, _ = optional_import("torchvision.models") @@ -29,7 +29,7 @@ class TorchVisionFCModel(NetAdapter): ``resnet18`` (default), ``resnet34m``, ``resnet50``, ``resnet101``, ``resnet152``, ``resnext50_32x4d``, ``resnext101_32x8d``, ``wide_resnet50_2``, ``wide_resnet101_2``. model details: https://pytorch.org/vision/stable/models.html. - n_classes: number of classes for the last classification layer. Default to 1. + num_classes: number of classes for the last classification layer. Default to 1. dim: number of spatial dimensions, default to 2. in_channels: number of the input channels of last layer. if None, get it from `in_features` of last layer. use_conv: whether use convolutional layer to replace the last layer, default to False. @@ -41,17 +41,22 @@ class TorchVisionFCModel(NetAdapter): pretrained: whether to use the imagenet pretrained weights. Default to False. """ + @deprecated_arg("n_classes", since="0.6") def __init__( self, model_name: str = "resnet18", - n_classes: int = 1, + num_classes: int = 1, dim: int = 2, in_channels: Optional[int] = None, use_conv: bool = False, pool: Optional[Tuple[str, Dict[str, Any]]] = ("avg", {"kernel_size": 7, "stride": 1}), bias: bool = True, pretrained: bool = False, + n_classes: Optional[int] = None, ): + # in case the new num_classes is default but you still call deprecated n_classes + if n_classes is not None and num_classes == 1: + num_classes = n_classes model = getattr(models, model_name)(pretrained=pretrained) # check if the model is compatible, should have a FC layer at the end if not str(list(model.children())[-1]).startswith("Linear"): @@ -59,7 +64,7 @@ def __init__( super().__init__( model=model, - n_classes=n_classes, + num_classes=num_classes, dim=dim, in_channels=in_channels, use_conv=use_conv, @@ -77,7 +82,7 @@ class TorchVisionFullyConvModel(TorchVisionFCModel): model_name: name of any torchvision with adaptive avg pooling and fully connected layer at the end. ``resnet18`` (default), ``resnet34m``, ``resnet50``, ``resnet101``, ``resnet152``, ``resnext50_32x4d``, ``resnext101_32x8d``, ``wide_resnet50_2``, ``wide_resnet101_2``. - n_classes: number of classes for the last classification layer. Default to 1. + num_classes: number of classes for the last classification layer. Default to 1. pool_size: the kernel size for `AvgPool2d` to replace `AdaptiveAvgPool2d`. Default to (7, 7). pool_stride: the stride for `AvgPool2d` to replace `AdaptiveAvgPool2d`. Default to 1. pretrained: whether to use the imagenet pretrained weights. Default to False. @@ -87,17 +92,22 @@ class TorchVisionFullyConvModel(TorchVisionFCModel): """ + @deprecated_arg("n_classes", since="0.6") def __init__( self, model_name: str = "resnet18", - n_classes: int = 1, + num_classes: int = 1, pool_size: Union[int, Tuple[int, int]] = (7, 7), pool_stride: Union[int, Tuple[int, int]] = 1, pretrained: bool = False, + n_classes: Optional[int] = None, ): + # in case the new num_classes is default but you still call deprecated n_classes + if n_classes is not None and num_classes == 1: + num_classes = n_classes super().__init__( model_name=model_name, - n_classes=n_classes, + num_classes=num_classes, use_conv=True, pool=("avg", {"kernel_size": pool_size, "stride": pool_stride}), pretrained=pretrained, diff --git a/monai/transforms/post/array.py b/monai/transforms/post/array.py index 7b3e7b4fd2..631947025c 100644 --- a/monai/transforms/post/array.py +++ b/monai/transforms/post/array.py @@ -25,7 +25,7 @@ from monai.networks.layers import GaussianFilter from monai.transforms.transform import Transform from monai.transforms.utils import fill_holes, get_largest_connected_component_mask -from monai.utils import ensure_tuple, look_up_option +from monai.utils import deprecated_arg, ensure_tuple, look_up_option __all__ = [ "Activations", @@ -120,7 +120,7 @@ class AsDiscrete(Transform): Defaults to ``False``. to_onehot: whether to convert input data into the one-hot format. Defaults to ``False``. - n_classes: the number of classes to convert to One-Hot format. + num_classes: the number of classes to convert to One-Hot format. Defaults to ``None``. threshold_values: whether threshold the float value to int number 0 or 1. Defaults to ``False``. @@ -131,31 +131,38 @@ class AsDiscrete(Transform): """ + @deprecated_arg("n_classes", since="0.6") def __init__( self, argmax: bool = False, to_onehot: bool = False, - n_classes: Optional[int] = None, + num_classes: Optional[int] = None, threshold_values: bool = False, logit_thresh: float = 0.5, rounding: Optional[str] = None, + n_classes: Optional[int] = None, ) -> None: + # in case the new num_classes is default but you still call deprecated n_classes + if n_classes is not None and num_classes is None: + num_classes = n_classes self.argmax = argmax self.to_onehot = to_onehot - self.n_classes = n_classes + self.num_classes = num_classes self.threshold_values = threshold_values self.logit_thresh = logit_thresh self.rounding = rounding + @deprecated_arg("n_classes", since="0.6") def __call__( self, img: torch.Tensor, argmax: Optional[bool] = None, to_onehot: Optional[bool] = None, - n_classes: Optional[int] = None, + num_classes: Optional[int] = None, threshold_values: Optional[bool] = None, logit_thresh: Optional[float] = None, rounding: Optional[str] = None, + n_classes: Optional[int] = None, ) -> torch.Tensor: """ Args: @@ -165,8 +172,8 @@ def __call__( Defaults to ``self.argmax``. to_onehot: whether to convert input data into the one-hot format. Defaults to ``self.to_onehot``. - n_classes: the number of classes to convert to One-Hot format. - Defaults to ``self.n_classes``. + num_classes: the number of classes to convert to One-Hot format. + Defaults to ``self.num_classes``. threshold_values: whether threshold the float value to int number 0 or 1. Defaults to ``self.threshold_values``. logit_thresh: the threshold value for thresholding operation.. @@ -175,13 +182,16 @@ def __call__( available options: ["torchrounding"]. """ + # in case the new num_classes is default but you still call deprecated n_classes + if n_classes is not None and num_classes is None: + num_classes = n_classes if argmax or self.argmax: img = torch.argmax(img, dim=0, keepdim=True) if to_onehot or self.to_onehot: - _nclasses = self.n_classes if n_classes is None else n_classes + _nclasses = self.num_classes if num_classes is None else num_classes if not isinstance(_nclasses, int): - raise AssertionError("One of self.n_classes or n_classes must be an integer") + raise AssertionError("One of self.num_classes or num_classes must be an integer") img = one_hot(img, num_classes=_nclasses, dim=0) if threshold_values or self.threshold_values: diff --git a/monai/transforms/post/dictionary.py b/monai/transforms/post/dictionary.py index d4e039339b..2fc3993e3e 100644 --- a/monai/transforms/post/dictionary.py +++ b/monai/transforms/post/dictionary.py @@ -39,7 +39,7 @@ from monai.transforms.transform import MapTransform from monai.transforms.utility.array import ToTensor from monai.transforms.utils import allow_missing_keys_mode, convert_inverse_interp_mode -from monai.utils import ensure_tuple, ensure_tuple_rep +from monai.utils import deprecated_arg, ensure_tuple, ensure_tuple_rep from monai.utils.enums import InverseKeys __all__ = [ @@ -126,16 +126,18 @@ class AsDiscreted(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.AsDiscrete`. """ + @deprecated_arg("n_classes", since="0.6") def __init__( self, keys: KeysCollection, argmax: Union[Sequence[bool], bool] = False, to_onehot: Union[Sequence[bool], bool] = False, - n_classes: Optional[Union[Sequence[int], int]] = None, + num_classes: Optional[Union[Sequence[int], int]] = None, threshold_values: Union[Sequence[bool], bool] = False, logit_thresh: Union[Sequence[float], float] = 0.5, rounding: Union[Sequence[Optional[str]], Optional[str]] = None, allow_missing_keys: bool = False, + n_classes: Optional[int] = None, ) -> None: """ Args: @@ -145,7 +147,7 @@ def __init__( it also can be a sequence of bool, each element corresponds to a key in ``keys``. to_onehot: whether to convert input data into the one-hot format. Defaults to False. it also can be a sequence of bool, each element corresponds to a key in ``keys``. - n_classes: the number of classes to convert to One-Hot format. it also can be a + num_classes: the number of classes to convert to One-Hot format. it also can be a sequence of int, each element corresponds to a key in ``keys``. threshold_values: whether threshold the float value to int number 0 or 1, default is False. it also can be a sequence of bool, each element corresponds to a key in ``keys``. @@ -157,10 +159,13 @@ def __init__( allow_missing_keys: don't raise exception if key is missing. """ + # in case the new num_classes is default but you still call deprecated n_classes + if n_classes is not None and num_classes is None: + num_classes = n_classes super().__init__(keys, allow_missing_keys) self.argmax = ensure_tuple_rep(argmax, len(self.keys)) self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys)) - self.n_classes = ensure_tuple_rep(n_classes, len(self.keys)) + self.num_classes = ensure_tuple_rep(num_classes, len(self.keys)) self.threshold_values = ensure_tuple_rep(threshold_values, len(self.keys)) self.logit_thresh = ensure_tuple_rep(logit_thresh, len(self.keys)) self.rounding = ensure_tuple_rep(rounding, len(self.keys)) @@ -168,14 +173,14 @@ def __init__( def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]: d = dict(data) - for key, argmax, to_onehot, n_classes, threshold_values, logit_thresh, rounding in self.key_iterator( - d, self.argmax, self.to_onehot, self.n_classes, self.threshold_values, self.logit_thresh, self.rounding + for key, argmax, to_onehot, num_classes, threshold_values, logit_thresh, rounding in self.key_iterator( + d, self.argmax, self.to_onehot, self.num_classes, self.threshold_values, self.logit_thresh, self.rounding ): d[key] = self.converter( d[key], argmax, to_onehot, - n_classes, + num_classes, threshold_values, logit_thresh, rounding, diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index dd045817fb..2eb6c447c6 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -282,13 +282,13 @@ def __init__(self, channel_dim: int = 0) -> None: self.channel_dim = channel_dim def __call__(self, img: NdarrayOrTensor) -> List[NdarrayOrTensor]: - n_classes = img.shape[self.channel_dim] - if n_classes <= 1: + num_classes = img.shape[self.channel_dim] + if num_classes <= 1: raise RuntimeError("input image does not contain multiple channels.") outputs = [] slices = [slice(None)] * len(img.shape) - for i in range(n_classes): + for i in range(num_classes): slices[self.channel_dim] = slice(i, i + 1) outputs.append(img[tuple(slices)]) diff --git a/tests/test_as_discrete.py b/tests/test_as_discrete.py index b87fafd8f3..bb9457a357 100644 --- a/tests/test_as_discrete.py +++ b/tests/test_as_discrete.py @@ -17,28 +17,28 @@ from monai.transforms import AsDiscrete TEST_CASE_1 = [ - {"argmax": True, "to_onehot": False, "n_classes": None, "threshold_values": False, "logit_thresh": 0.5}, + {"argmax": True, "to_onehot": False, "num_classes": None, "threshold_values": False, "logit_thresh": 0.5}, torch.tensor([[[0.0, 1.0]], [[2.0, 3.0]]]), torch.tensor([[[1.0, 1.0]]]), (1, 1, 2), ] TEST_CASE_2 = [ - {"argmax": True, "to_onehot": True, "n_classes": 2, "threshold_values": False, "logit_thresh": 0.5}, + {"argmax": True, "to_onehot": True, "num_classes": 2, "threshold_values": False, "logit_thresh": 0.5}, torch.tensor([[[0.0, 1.0]], [[2.0, 3.0]]]), torch.tensor([[[0.0, 0.0]], [[1.0, 1.0]]]), (2, 1, 2), ] TEST_CASE_3 = [ - {"argmax": False, "to_onehot": False, "n_classes": None, "threshold_values": True, "logit_thresh": 0.6}, + {"argmax": False, "to_onehot": False, "num_classes": None, "threshold_values": True, "logit_thresh": 0.6}, torch.tensor([[[0.0, 1.0], [2.0, 3.0]]]), torch.tensor([[[0.0, 1.0], [1.0, 1.0]]]), (1, 2, 2), ] TEST_CASE_4 = [ - {"argmax": False, "to_onehot": True, "n_classes": 3}, + {"argmax": False, "to_onehot": True, "num_classes": 3}, torch.tensor(1), torch.tensor([0.0, 1.0, 0.0]), (3,), diff --git a/tests/test_as_discreted.py b/tests/test_as_discreted.py index ac594f0daa..90e98b297b 100644 --- a/tests/test_as_discreted.py +++ b/tests/test_as_discreted.py @@ -21,7 +21,7 @@ "keys": ["pred", "label"], "argmax": [True, False], "to_onehot": True, - "n_classes": 2, + "num_classes": 2, "threshold_values": False, "logit_thresh": 0.5, }, @@ -35,7 +35,7 @@ "keys": ["pred", "label"], "argmax": False, "to_onehot": False, - "n_classes": None, + "num_classes": None, "threshold_values": [True, False], "logit_thresh": 0.6, }, @@ -49,7 +49,7 @@ "keys": ["pred"], "argmax": True, "to_onehot": True, - "n_classes": 2, + "num_classes": 2, "threshold_values": False, "logit_thresh": 0.5, }, diff --git a/tests/test_compute_roc_auc.py b/tests/test_compute_roc_auc.py index 79d62b6436..1cec357b93 100644 --- a/tests/test_compute_roc_auc.py +++ b/tests/test_compute_roc_auc.py @@ -87,7 +87,7 @@ class TestComputeROCAUC(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) def test_value(self, y_pred, y, softmax, to_onehot, average, expected_value): y_pred_trans = Compose([ToTensor(), Activations(softmax=softmax)]) - y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot, n_classes=2)]) + y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot, num_classes=2)]) y_pred = torch.stack([y_pred_trans(i) for i in decollate_batch(y_pred)], dim=0) y = torch.stack([y_trans(i) for i in decollate_batch(y)], dim=0) result = compute_roc_auc(y_pred=y_pred, y=y, average=average) @@ -96,7 +96,7 @@ def test_value(self, y_pred, y, softmax, to_onehot, average, expected_value): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) def test_class_value(self, y_pred, y, softmax, to_onehot, average, expected_value): y_pred_trans = Compose([ToTensor(), Activations(softmax=softmax)]) - y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot, n_classes=2)]) + y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot, num_classes=2)]) y_pred = [y_pred_trans(i) for i in decollate_batch(y_pred)] y = [y_trans(i) for i in decollate_batch(y)] metric = ROCAUCMetric(average=average) diff --git a/tests/test_handler_decollate_batch.py b/tests/test_handler_decollate_batch.py index bc74cf5328..8f0ffb2b5c 100644 --- a/tests/test_handler_decollate_batch.py +++ b/tests/test_handler_decollate_batch.py @@ -32,7 +32,7 @@ def test_compute(self): [ Activationsd(keys="pred", sigmoid=True), CopyItemsd(keys="filename", times=1, names="filename_bak"), - AsDiscreted(keys="pred", threshold_values=True, to_onehot=True, n_classes=2), + AsDiscreted(keys="pred", threshold_values=True, to_onehot=True, num_classes=2), ] ) ), diff --git a/tests/test_handler_post_processing.py b/tests/test_handler_post_processing.py index 552cde9eb1..e9d57128cb 100644 --- a/tests/test_handler_post_processing.py +++ b/tests/test_handler_post_processing.py @@ -26,7 +26,7 @@ "transform": Compose( [ CopyItemsd(keys="filename", times=1, names="filename_bak"), - AsDiscreted(keys="pred", threshold_values=True, to_onehot=True, n_classes=2), + AsDiscreted(keys="pred", threshold_values=True, to_onehot=True, num_classes=2), ] ), "event": "iteration_completed", diff --git a/tests/test_handler_rocauc.py b/tests/test_handler_rocauc.py index 46594eb629..5b80bc43eb 100644 --- a/tests/test_handler_rocauc.py +++ b/tests/test_handler_rocauc.py @@ -22,7 +22,7 @@ class TestHandlerROCAUC(unittest.TestCase): def test_compute(self): auc_metric = ROCAUC() act = Activations(softmax=True) - to_onehot = AsDiscrete(to_onehot=True, n_classes=2) + to_onehot = AsDiscrete(to_onehot=True, num_classes=2) y_pred = [torch.Tensor([0.1, 0.9]), torch.Tensor([0.3, 1.4])] y = [torch.Tensor([0]), torch.Tensor([1])] diff --git a/tests/test_handler_rocauc_dist.py b/tests/test_handler_rocauc_dist.py index e728c80be6..8316d4c4b6 100644 --- a/tests/test_handler_rocauc_dist.py +++ b/tests/test_handler_rocauc_dist.py @@ -26,7 +26,7 @@ class DistributedROCAUC(DistTestCase): def test_compute(self): auc_metric = ROCAUC() act = Activations(softmax=True) - to_onehot = AsDiscrete(to_onehot=True, n_classes=2) + to_onehot = AsDiscrete(to_onehot=True, num_classes=2) device = f"cuda:{dist.get_rank()}" if torch.cuda.is_available() else "cpu" if dist.get_rank() == 0: diff --git a/tests/test_integration_classification_2d.py b/tests/test_integration_classification_2d.py index db435ee4e4..03b5571973 100644 --- a/tests/test_integration_classification_2d.py +++ b/tests/test_integration_classification_2d.py @@ -80,7 +80,7 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", [LoadImage(image_only=True), AddChannel(), Transpose(indices=[0, 2, 1]), ScaleIntensity(), ToTensor()] ) y_pred_trans = Compose([ToTensor(), Activations(softmax=True)]) - y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=True, n_classes=len(np.unique(train_y)))]) + y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=True, num_classes=len(np.unique(train_y)))]) auc_metric = ROCAUCMetric() # create train, val data loaders diff --git a/tests/test_net_adapter.py b/tests/test_net_adapter.py index 1ec3e26203..b2d55129a7 100644 --- a/tests/test_net_adapter.py +++ b/tests/test_net_adapter.py @@ -20,31 +20,31 @@ device = "cuda" if torch.cuda.is_available() else "cpu" TEST_CASE_0 = [ - {"n_classes": 1, "use_conv": True, "dim": 2}, + {"num_classes": 1, "use_conv": True, "dim": 2}, (2, 3, 224, 224), (2, 1, 8, 1), ] TEST_CASE_1 = [ - {"n_classes": 1, "use_conv": True, "dim": 3, "pool": None}, + {"num_classes": 1, "use_conv": True, "dim": 3, "pool": None}, (2, 3, 32, 32, 32), (2, 1, 1, 1, 1), ] TEST_CASE_2 = [ - {"n_classes": 5, "use_conv": True, "dim": 3, "pool": None}, + {"num_classes": 5, "use_conv": True, "dim": 3, "pool": None}, (2, 3, 32, 32, 32), (2, 5, 1, 1, 1), ] TEST_CASE_3 = [ - {"n_classes": 5, "use_conv": True, "pool": ("avg", {"kernel_size": 4, "stride": 1}), "dim": 3}, + {"num_classes": 5, "use_conv": True, "pool": ("avg", {"kernel_size": 4, "stride": 1}), "dim": 3}, (2, 3, 128, 128, 128), (2, 5, 5, 1, 1), ] TEST_CASE_4 = [ - {"n_classes": 5, "use_conv": False, "pool": ("adaptiveavg", {"output_size": (1, 1, 1)}), "dim": 3}, + {"num_classes": 5, "use_conv": False, "pool": ("adaptiveavg", {"output_size": (1, 1, 1)}), "dim": 3}, (2, 3, 32, 32, 32), (2, 5), ] diff --git a/tests/test_resnet.py b/tests/test_resnet.py index a20be298b9..c4ba5c2e16 100644 --- a/tests/test_resnet.py +++ b/tests/test_resnet.py @@ -31,19 +31,19 @@ device = "cuda" if torch.cuda.is_available() else "cpu" TEST_CASE_1 = [ # 3D, batch 3, 2 input channel - {"pretrained": False, "spatial_dims": 3, "n_input_channels": 2, "n_classes": 3}, + {"pretrained": False, "spatial_dims": 3, "n_input_channels": 2, "num_classes": 3}, (3, 2, 32, 64, 48), (3, 3), ] TEST_CASE_2 = [ # 2D, batch 2, 1 input channel - {"pretrained": False, "spatial_dims": 2, "n_input_channels": 1, "n_classes": 3}, + {"pretrained": False, "spatial_dims": 2, "n_input_channels": 1, "num_classes": 3}, (2, 1, 32, 64), (2, 3), ] TEST_CASE_3 = [ # 1D, batch 1, 2 input channels - {"pretrained": False, "spatial_dims": 1, "n_input_channels": 2, "n_classes": 3}, + {"pretrained": False, "spatial_dims": 1, "n_input_channels": 2, "num_classes": 3}, (1, 2, 32), (1, 3), ] diff --git a/tests/test_torchvision_fc_model.py b/tests/test_torchvision_fc_model.py index ae39968266..d6d3ea69c9 100644 --- a/tests/test_torchvision_fc_model.py +++ b/tests/test_torchvision_fc_model.py @@ -24,19 +24,19 @@ device = "cuda" if torch.cuda.is_available() else "cpu" TEST_CASE_0 = [ - {"model_name": "resnet18", "n_classes": 1, "use_conv": True, "pretrained": False}, + {"model_name": "resnet18", "num_classes": 1, "use_conv": True, "pretrained": False}, (2, 3, 224, 224), (2, 1, 1, 1), ] TEST_CASE_1 = [ - {"model_name": "resnet18", "n_classes": 1, "use_conv": True, "pretrained": False}, + {"model_name": "resnet18", "num_classes": 1, "use_conv": True, "pretrained": False}, (2, 3, 256, 256), (2, 1, 2, 2), ] TEST_CASE_2 = [ - {"model_name": "resnet101", "n_classes": 5, "use_conv": True, "pretrained": False}, + {"model_name": "resnet101", "num_classes": 5, "use_conv": True, "pretrained": False}, (2, 3, 256, 256), (2, 5, 2, 2), ] @@ -44,7 +44,7 @@ TEST_CASE_3 = [ { "model_name": "resnet101", - "n_classes": 5, + "num_classes": 5, "use_conv": True, "pool": ("avg", {"kernel_size": 6, "stride": 1}), "pretrained": False, @@ -54,60 +54,60 @@ ] TEST_CASE_4 = [ - {"model_name": "resnet18", "n_classes": 1, "use_conv": False, "pool": None, "pretrained": False}, + {"model_name": "resnet18", "num_classes": 1, "use_conv": False, "pool": None, "pretrained": False}, (2, 3, 224, 224), (2, 1), ] TEST_CASE_5 = [ - {"model_name": "resnet18", "n_classes": 1, "use_conv": False, "pool": None, "pretrained": False}, + {"model_name": "resnet18", "num_classes": 1, "use_conv": False, "pool": None, "pretrained": False}, (2, 3, 256, 256), (2, 1), ] TEST_CASE_6 = [ - {"model_name": "resnet101", "n_classes": 5, "use_conv": False, "pool": None, "pretrained": False}, + {"model_name": "resnet101", "num_classes": 5, "use_conv": False, "pool": None, "pretrained": False}, (2, 3, 256, 256), (2, 5), ] TEST_CASE_PRETRAINED_0 = [ - {"model_name": "resnet18", "n_classes": 1, "use_conv": True, "pretrained": True}, + {"model_name": "resnet18", "num_classes": 1, "use_conv": True, "pretrained": True}, (2, 3, 224, 224), (2, 1, 1, 1), -0.010419349186122417, ] TEST_CASE_PRETRAINED_1 = [ - {"model_name": "resnet18", "n_classes": 1, "use_conv": True, "pretrained": True}, + {"model_name": "resnet18", "num_classes": 1, "use_conv": True, "pretrained": True}, (2, 3, 256, 256), (2, 1, 2, 2), -0.010419349186122417, ] TEST_CASE_PRETRAINED_2 = [ - {"model_name": "resnet18", "n_classes": 5, "use_conv": True, "pretrained": True}, + {"model_name": "resnet18", "num_classes": 5, "use_conv": True, "pretrained": True}, (2, 3, 256, 256), (2, 5, 2, 2), -0.010419349186122417, ] TEST_CASE_PRETRAINED_3 = [ - {"model_name": "resnet18", "n_classes": 1, "use_conv": False, "pool": None, "pretrained": True}, + {"model_name": "resnet18", "num_classes": 1, "use_conv": False, "pool": None, "pretrained": True}, (2, 3, 224, 224), (2, 1), -0.010419349186122417, ] TEST_CASE_PRETRAINED_4 = [ - {"model_name": "resnet18", "n_classes": 1, "use_conv": False, "pool": None, "pretrained": True}, + {"model_name": "resnet18", "num_classes": 1, "use_conv": False, "pool": None, "pretrained": True}, (2, 3, 256, 256), (2, 1), -0.010419349186122417, ] TEST_CASE_PRETRAINED_5 = [ - {"model_name": "resnet18", "n_classes": 5, "use_conv": False, "pool": None, "pretrained": True}, + {"model_name": "resnet18", "num_classes": 5, "use_conv": False, "pool": None, "pretrained": True}, (2, 3, 256, 256), (2, 5), -0.010419349186122417, diff --git a/tests/test_torchvision_fully_conv_model.py b/tests/test_torchvision_fully_conv_model.py index 2c65f0d32c..af2c1458d3 100644 --- a/tests/test_torchvision_fully_conv_model.py +++ b/tests/test_torchvision_fully_conv_model.py @@ -24,45 +24,45 @@ device = "cuda" if torch.cuda.is_available() else "cpu" TEST_CASE_0 = [ - {"model_name": "resnet18", "n_classes": 1, "pretrained": False}, + {"model_name": "resnet18", "num_classes": 1, "pretrained": False}, (2, 3, 224, 224), (2, 1, 1, 1), ] TEST_CASE_1 = [ - {"model_name": "resnet18", "n_classes": 1, "pretrained": False}, + {"model_name": "resnet18", "num_classes": 1, "pretrained": False}, (2, 3, 256, 256), (2, 1, 2, 2), ] TEST_CASE_2 = [ - {"model_name": "resnet101", "n_classes": 5, "pretrained": False}, + {"model_name": "resnet101", "num_classes": 5, "pretrained": False}, (2, 3, 256, 256), (2, 5, 2, 2), ] TEST_CASE_3 = [ - {"model_name": "resnet101", "n_classes": 5, "pool_size": 6, "pretrained": False}, + {"model_name": "resnet101", "num_classes": 5, "pool_size": 6, "pretrained": False}, (2, 3, 224, 224), (2, 5, 2, 2), ] TEST_CASE_PRETRAINED_0 = [ - {"model_name": "resnet18", "n_classes": 1, "pretrained": True}, + {"model_name": "resnet18", "num_classes": 1, "pretrained": True}, (2, 3, 224, 224), (2, 1, 1, 1), -0.010419349186122417, ] TEST_CASE_PRETRAINED_1 = [ - {"model_name": "resnet18", "n_classes": 1, "pretrained": True}, + {"model_name": "resnet18", "num_classes": 1, "pretrained": True}, (2, 3, 256, 256), (2, 1, 2, 2), -0.010419349186122417, ] TEST_CASE_PRETRAINED_2 = [ - {"model_name": "resnet18", "n_classes": 5, "pretrained": True}, + {"model_name": "resnet18", "num_classes": 5, "pretrained": True}, (2, 3, 256, 256), (2, 5, 2, 2), -0.010419349186122417,