diff --git a/monai/_extensions/gmm/gmm_cpu.cpp b/monai/_extensions/gmm/gmm_cpu.cpp index 144e66806c..87ab1c4dab 100644 --- a/monai/_extensions/gmm/gmm_cpu.cpp +++ b/monai/_extensions/gmm/gmm_cpu.cpp @@ -22,5 +22,5 @@ void learn_cpu(const float* input, const int* labels, float* gmm, float* scratch void apply_cpu(const float* gmm, const float* input, float* output, unsigned int batch_count, unsigned int element_count) { - throw std::invalid_argument("GMM recieved a cpu tensor but is not yet implemented for the cpu"); + throw std::invalid_argument("GMM received a cpu tensor but is not yet implemented for the cpu"); } diff --git a/monai/apps/pathology/metrics/lesion_froc.py b/monai/apps/pathology/metrics/lesion_froc.py index fa4ce422be..1a7f61b921 100644 --- a/monai/apps/pathology/metrics/lesion_froc.py +++ b/monai/apps/pathology/metrics/lesion_froc.py @@ -147,7 +147,7 @@ def compute_fp_tp(self): total_tp_probs.extend(tp_probs) total_num_targets += num_targets - return (np.array(total_fp_probs), np.array(total_tp_probs), total_num_targets, num_images) + return np.array(total_fp_probs), np.array(total_tp_probs), total_num_targets, num_images def evaluate(self): """ diff --git a/monai/apps/pathology/transforms/spatial/dictionary.py b/monai/apps/pathology/transforms/spatial/dictionary.py index 0168ac3108..f998e53c93 100644 --- a/monai/apps/pathology/transforms/spatial/dictionary.py +++ b/monai/apps/pathology/transforms/spatial/dictionary.py @@ -29,11 +29,11 @@ class SplitOnGridd(MapTransform): This transform works only with torch.Tensor inputs. Args: - grid_shape: a tuple or an integer define the shape of the grid upon which to extract patches. + grid_size: a tuple or an integer define the shape of the grid upon which to extract patches. If it's an integer, the value will be repeated for each dimension. Default is 2x2 patch_size: a tuple or an integer that defines the output patch sizes. If it's an integer, the value will be repeated for each dimension. - The default is (0, 0), where the patch size will be infered from the grid shape. + The default is (0, 0), where the patch size will be inferred from the grid shape. Note: the shape of the input image is infered based on the first image used. """ diff --git a/monai/data/dataset.py b/monai/data/dataset.py index debf9c6aa4..21c9236cae 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -29,9 +29,10 @@ from torch.utils.data import Dataset as _TorchDataset from torch.utils.data import Subset -from monai.data.utils import convert_tables_to_dicts, first, pickle_hashing +from monai.data.utils import convert_tables_to_dicts, pickle_hashing from monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform from monai.utils import MAX_SEED, ensure_tuple, get_seed, min_version, optional_import +from monai.utils.misc import first if TYPE_CHECKING: from tqdm import tqdm diff --git a/monai/data/utils.py b/monai/data/utils.py index 29dbeeda24..1ad01976e4 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -467,7 +467,7 @@ def worker_init_fn(worker_id: int) -> None: def set_rnd(obj, seed: int) -> int: """ - Set seed or random state for all randomisable properties of obj. + Set seed or random state for all randomizable properties of obj. Args: obj: object to set seed or random state for. diff --git a/monai/engines/utils.py b/monai/engines/utils.py index c1fba725e3..25e09fe2b6 100644 --- a/monai/engines/utils.py +++ b/monai/engines/utils.py @@ -146,9 +146,8 @@ def __call__( class PrepareBatchDefault(PrepareBatch): """ - Default prepare_batch method to return `image` and `label` only, - it's consistent with `default_prerpare_batch` API. - + Default prepare batch method to return `image` and `label` only, + it's to be consistent with `default_prepare_batch` API. """ def __call__( @@ -162,13 +161,13 @@ def __call__( class PrepareBatchExtraInput(PrepareBatch): """ - Customized prepare_batch for trainer or evalutor that support extra input data for network. + Customized prepare_batch for trainer or evaluator that support extra input data for network. Extra items are specified by the `extra_keys` parameter. Args: extra_keys: if a string or list provided, every item is the key of extra data in current batch, and will pass the extra data to the network(*args) in order. - if a dict provided, every `{k, v}` pair is the key of extra data in current batch, + If a dictionary is provided, every `{k, v}` pair is the key of extra data in current batch, `k` the param name in network, `v` is the key of extra data in current batch, and will pass the `{k1: batch[v1], k2: batch[v2], ...}` as kwargs to the network. diff --git a/monai/engines/workflow.py b/monai/engines/workflow.py index 36e0d0e9d3..48e2dc1774 100644 --- a/monai/engines/workflow.py +++ b/monai/engines/workflow.py @@ -231,8 +231,8 @@ def _compare_metrics(engine: Engine) -> None: current_val_metric = engine.state.metrics[key_metric_name] if not is_scalar(current_val_metric): warnings.warn( - "key metric is not a scalar value, skip the metric comaprison with best metric." - "please use other metrics as key metric, or change the `reduction` mode to 'mean'." + "key metric is not a scalar value, skip the metric comparison with the current best metric." + "please set other metrics as the key metric, or change the `reduction` mode to 'mean'." ) return diff --git a/monai/handlers/checkpoint_saver.py b/monai/handlers/checkpoint_saver.py index 8d0dd84efe..607cd11b25 100644 --- a/monai/handlers/checkpoint_saver.py +++ b/monai/handlers/checkpoint_saver.py @@ -163,7 +163,7 @@ def _score_func(engine: Engine): metric = engine.state.metrics[metric_name] if not is_scalar(metric): warnings.warn( - "key metric is not a scalar value, skip metric comaprison and don't save a model." + "key metric is not a scalar value, skip metric comparison and don't save a model." "please use other metrics as key metric, or change the `reduction` mode to 'mean'." ) return -1 diff --git a/monai/handlers/confusion_matrix.py b/monai/handlers/confusion_matrix.py index cd067816ea..1fa70d3d8e 100644 --- a/monai/handlers/confusion_matrix.py +++ b/monai/handlers/confusion_matrix.py @@ -13,7 +13,7 @@ from monai.handlers.ignite_metric import IgniteMetric from monai.metrics import ConfusionMatrixMetric -from monai.metrics.utils import MetricReduction +from monai.utils.enums import MetricReduction class ConfusionMatrix(IgniteMetric): diff --git a/monai/handlers/nvtx_handlers.py b/monai/handlers/nvtx_handlers.py index 37bacc7f95..8e44248f7d 100644 --- a/monai/handlers/nvtx_handlers.py +++ b/monai/handlers/nvtx_handlers.py @@ -71,7 +71,7 @@ def resolve_events(self, events: Union[str, Tuple]) -> Tuple[Events, Events]: if len(events) == 1: return self.create_paired_events(events[0]) if len(events) == 2: - return (self.get_event(events[0]), self.get_event(events[1])) + return self.get_event(events[0]), self.get_event(events[1]) raise ValueError(f"Exactly two Ignite events should be provided [received {len(events)}].") def create_paired_events(self, event: str) -> Tuple[Events, Events]: @@ -80,7 +80,7 @@ def create_paired_events(self, event: str) -> Tuple[Events, Events]: """ event = event.upper() event_prefix = {"": "", "ENGINE": "", "EPOCH": "EPOCH_", "ITERATION": "ITERATION_", "BATCH": "GET_BATCH_"} - return (self.get_event(event_prefix[event] + "STARTED"), self.get_event(event_prefix[event] + "COMPLETED")) + return self.get_event(event_prefix[event] + "STARTED"), self.get_event(event_prefix[event] + "COMPLETED") def get_event(self, event: Union[str, Events]) -> Events: return Events[event.upper()] if isinstance(event, str) else event diff --git a/monai/losses/image_dissimilarity.py b/monai/losses/image_dissimilarity.py index 911ae22185..ce38dfd08d 100644 --- a/monai/losses/image_dissimilarity.py +++ b/monai/losses/image_dissimilarity.py @@ -73,7 +73,7 @@ def __init__( ) -> None: """ Args: - spatial_dims: number of spatial ndimensions, {``1``, ``2``, ``3``}. Defaults to 3. + spatial_dims: number of spatial dimensions, {``1``, ``2``, ``3``}. Defaults to 3. kernel_size: kernel spatial size, must be odd. kernel_type: {``"rectangular"``, ``"triangular"``, ``"gaussian"``}. Defaults to ``"rectangular"``. reduction: {``"none"``, ``"mean"``, ``"sum"``} diff --git a/monai/metrics/cumulative_average.py b/monai/metrics/cumulative_average.py index 269195eeb3..985c0a914e 100644 --- a/monai/metrics/cumulative_average.py +++ b/monai/metrics/cumulative_average.py @@ -21,9 +21,9 @@ class CumulativeAverage(Cumulative): """ Cumulatively record data value and aggregate for the average value. It supports single class or multi-class data, for example, - value can be 0.44 (like loss) or [0.3, 0.4] (like metrics of 2 classes). + value can be 0.44 (a loss value) or [0.3, 0.4] (metrics of two classes). It also supports distributed data parallel, sync data when aggregating. - For example, recording loss value and compute the oveall average value in every 5 iterations: + For example, recording loss values and compute the overall average value in every 5 iterations: .. code-block:: python diff --git a/monai/metrics/utils.py b/monai/metrics/utils.py index 540eda3096..23ec555b35 100644 --- a/monai/metrics/utils.py +++ b/monai/metrics/utils.py @@ -150,7 +150,7 @@ def get_mask_edges( if crop: if not np.any(seg_pred | seg_gt): - return (np.zeros_like(seg_pred), np.zeros_like(seg_gt)) + return np.zeros_like(seg_pred), np.zeros_like(seg_gt) seg_pred, seg_gt = np.expand_dims(seg_pred, 0), np.expand_dims(seg_gt, 0) box_start, box_end = generate_spatial_bounding_box(np.asarray(seg_pred | seg_gt)) @@ -161,7 +161,7 @@ def get_mask_edges( edges_pred = binary_erosion(seg_pred) ^ seg_pred edges_gt = binary_erosion(seg_gt) ^ seg_gt - return (edges_pred, edges_gt) + return edges_pred, edges_gt def get_surface_distance(seg_pred: np.ndarray, seg_gt: np.ndarray, distance_metric: str = "euclidean") -> np.ndarray: diff --git a/monai/networks/nets/basic_unet.py b/monai/networks/nets/basic_unet.py index f96b299d2b..4a72dcdd9a 100644 --- a/monai/networks/nets/basic_unet.py +++ b/monai/networks/nets/basic_unet.py @@ -18,7 +18,7 @@ from monai.networks.layers.factories import Conv, Pool from monai.utils import deprecated_arg, ensure_tuple_rep -__all__ = ["BasicUNet", "BasicUnet", "Basicunet"] +__all__ = ["BasicUnet", "Basicunet", "basicunet", "BasicUNet"] class TwoConv(nn.Sequential): diff --git a/monai/networks/nets/dynunet.py b/monai/networks/nets/dynunet.py index 696c9d25dc..4cd3046261 100644 --- a/monai/networks/nets/dynunet.py +++ b/monai/networks/nets/dynunet.py @@ -75,7 +75,7 @@ class DynUNet(nn.Module): To meet the requirements of the structure, the input size for each spatial dimension should be divisible by `2 * the product of all strides in the corresponding dimension`. The output size for each spatial dimension - equals to the input size of the correponding dimension divided by the stride in strides[0]. + equals to the input size of the corresponding dimension divided by the stride in strides[0]. For example, if `strides=((1, 2, 4), 2, 1, 1)`, the minimal spatial size of the input is `(8, 16, 32)`, and the spatial size of the output is `(8, 8, 8)`. diff --git a/monai/networks/nets/efficientnet.py b/monai/networks/nets/efficientnet.py index 6cd0e83e46..d912acce3d 100644 --- a/monai/networks/nets/efficientnet.py +++ b/monai/networks/nets/efficientnet.py @@ -23,7 +23,14 @@ from monai.networks.layers.utils import get_norm_layer from monai.utils.module import look_up_option -__all__ = ["EfficientNet", "EfficientNetBN", "get_efficientnet_image_size", "drop_connect"] +__all__ = [ + "EfficientNet", + "EfficientNetBN", + "get_efficientnet_image_size", + "drop_connect", + "EfficientNetBNFeatures", + "BlockArgs", +] efficientnet_params = { # model_name: (width_mult, depth_mult, image_size, dropout_rate, dropconnect_rate) @@ -666,7 +673,7 @@ def drop_connect(inputs: torch.Tensor, p: float, training: bool) -> torch.Tensor e.g. 1D activations [B, C, H], 2D activations [B, C, H, W] and 3D activations [B, C, H, W, D] Args: - input: input tensor with [B, C, dim_1, dim_2, ..., dim_N] where N=spatial_dims. + inputs: input tensor with [B, C, dim_1, dim_2, ..., dim_N] where N=spatial_dims. p: probability to use for dropping connections. training: whether in training or evaluation mode. diff --git a/monai/networks/nets/resnet.py b/monai/networks/nets/resnet.py index acfbfc88f5..fc400895df 100644 --- a/monai/networks/nets/resnet.py +++ b/monai/networks/nets/resnet.py @@ -29,14 +29,14 @@ def get_inplanes(): def get_avgpool(): - return [(0), (1), (1, 1), (1, 1, 1)] + return [0, 1, (1, 1), (1, 1, 1)] def get_conv1(conv1_t_size: int, conv1_t_stride: int): return ( - [(0), (conv1_t_size), (conv1_t_size, 7), (conv1_t_size, 7, 7)], - [(0), (conv1_t_stride), (conv1_t_stride, 2), (conv1_t_stride, 2, 2)], - [(0), (conv1_t_size // 2), (conv1_t_size // 2, 3), (conv1_t_size // 2, 3, 3)], + [0, conv1_t_size, (conv1_t_size, 7), (conv1_t_size, 7, 7)], + [0, conv1_t_stride, (conv1_t_stride, 2), (conv1_t_stride, 2, 2)], + [0, (conv1_t_size // 2), (conv1_t_size // 2, 3), (conv1_t_size // 2, 3, 3)], ) diff --git a/monai/transforms/post/array.py b/monai/transforms/post/array.py index 6bcd4df9ef..d83443507a 100644 --- a/monai/transforms/post/array.py +++ b/monai/transforms/post/array.py @@ -179,7 +179,7 @@ def __init__( self.to_onehot = to_onehot if isinstance(threshold, bool): # for backward compatibility - warnings.warn("`threshold_values=True/False` is deprecated, please use `threashold=value` instead.") + warnings.warn("`threshold_values=True/False` is deprecated, please use `threshold=value` instead.") threshold = logit_thresh if threshold else None self.threshold = threshold @@ -211,7 +211,7 @@ def __call__( Defaults to ``self.argmax``. to_onehot: if not None, convert input data into the one-hot format with specified number of classes. Defaults to ``self.to_onehot``. - threshold: if not None, threshold the float values to int number 0 or 1 with specified theashold value. + threshold: if not None, threshold the float values to int number 0 or 1 with specified threshold value. Defaults to ``self.threshold``. rounding: if not None, round the data according to the specified option, available options: ["torchrounding"]. @@ -229,7 +229,7 @@ def __call__( warnings.warn("`to_onehot=True/False` is deprecated, please use `to_onehot=num_classes` instead.") to_onehot = num_classes if to_onehot else None if isinstance(threshold, bool): - warnings.warn("`threshold_values=True/False` is deprecated, please use `threashold=value` instead.") + warnings.warn("`threshold_values=True/False` is deprecated, please use `threshold=value` instead.") threshold = logit_thresh if threshold else None img_t: torch.Tensor diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index e6aef97143..082be8dea8 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -1555,7 +1555,7 @@ class RandCuCIMd(CuCIMd, RandomizableTransform): Users can call `ToCuPy` transform to convert a numpy array or torch tensor to cupy array. - If the cuCIM transform is already randomized the `apply_prob` argument has nothing to do with the randomness of the underlying cuCIM transform. `apply_prob` defines if the transform (either randomized - or non-randomized) being applied randomly, so it can apply non-randomized tranforms randomly but be careful + or non-randomized) being applied randomly, so it can apply non-randomized transforms randomly but be careful with setting `apply_prob` to anything than 1.0 when using along with cuCIM's randomized transforms. - If the random factor of the underlying cuCIM transform is not derived from `self.R`, the results may not be deterministic. See Also: :py:class:`monai.transforms.Randomizable`. diff --git a/monai/utils/jupyter_utils.py b/monai/utils/jupyter_utils.py index f862452fb1..1f55f2fa61 100644 --- a/monai/utils/jupyter_utils.py +++ b/monai/utils/jupyter_utils.py @@ -178,6 +178,7 @@ def plot_engine_status( window_fraction: for metric plot, what fraction of the graph value length to use as the running average window image_fn: callable converting tensors keyed to a name in the Engine to a tuple of images to plot fig: Figure object to plot into, reuse from previous plotting for flicker-free refreshing + selected_inst: index of the instance to show in the image plot Returns: Figure object (or `fig` if given), list of Axes objects for graph and images diff --git a/monai/visualize/occlusion_sensitivity.py b/monai/visualize/occlusion_sensitivity.py index 51bcb1f517..bc2ecc3787 100644 --- a/monai/visualize/occlusion_sensitivity.py +++ b/monai/visualize/occlusion_sensitivity.py @@ -167,7 +167,7 @@ def __init__( upsampler: An upsampling method to upsample the output image. Default is N-dimensional linear (bilinear, trilinear, etc.) depending on num spatial dimensions of input. - verbose: Use ``tdqm.trange`` output (if available). + verbose: Use ``tqdm.trange`` output (if available). """ self.nn_module = nn_module diff --git a/monai/visualize/utils.py b/monai/visualize/utils.py index 0a985c0c21..a4b85d6e58 100644 --- a/monai/visualize/utils.py +++ b/monai/visualize/utils.py @@ -132,7 +132,7 @@ def blend_images( image: NdarrayOrTensor, label: NdarrayOrTensor, alpha: float = 0.5, cmap: str = "hsv", rescale_arrays: bool = True ): """ - Blend a image and a label. Both should have the shape CHW[D]. + Blend an image and a label. Both should have the shape CHW[D]. The image may have C==1 or 3 channels (greyscale or RGB). The label is expected to have C==1.