From 99929cb263b6b05de873dd0289045e1ed2bdf3e5 Mon Sep 17 00:00:00 2001 From: "deepsource-autofix[bot]" <62050782+deepsource-autofix[bot]@users.noreply.github.com> Date: Wed, 13 Jan 2021 17:02:39 +0000 Subject: [PATCH 1/3] Remove assert statement from non-test files --- monai/data/csv_saver.py | 6 ++-- monai/data/nifti_writer.py | 3 +- monai/data/png_writer.py | 3 +- monai/data/synthetic.py | 6 ++-- monai/engines/utils.py | 3 +- monai/engines/workflow.py | 3 +- monai/handlers/checkpoint_loader.py | 6 ++-- monai/handlers/checkpoint_saver.py | 36 ++++++++++++++-------- monai/inferers/utils.py | 3 +- monai/losses/dice.py | 32 +++++++++++-------- monai/losses/tversky.py | 7 +++-- monai/metrics/rocauc.py | 13 +++++--- monai/networks/blocks/dynunet_block.py | 9 ++++-- monai/networks/blocks/segresnet_block.py | 6 ++-- monai/networks/nets/ahnet.py | 6 ++-- monai/networks/nets/dynunet.py | 18 +++++++---- monai/networks/nets/segresnet.py | 3 +- monai/networks/nets/vnet.py | 3 +- monai/networks/utils.py | 6 ++-- monai/transforms/croppad/array.py | 3 +- monai/transforms/croppad/dictionary.py | 9 ++++-- monai/transforms/intensity/array.py | 39 ++++++++++++++++-------- monai/transforms/intensity/dictionary.py | 27 ++++++++++------ monai/transforms/io/array.py | 15 +++++---- monai/transforms/io/dictionary.py | 6 ++-- monai/transforms/post/array.py | 3 +- monai/transforms/spatial/array.py | 12 +++++--- monai/transforms/spatial/dictionary.py | 3 +- monai/transforms/utility/array.py | 12 +++++--- monai/transforms/utility/dictionary.py | 3 +- monai/utils/aliases.py | 3 +- monai/utils/module.py | 3 +- monai/visualize/img2tensorboard.py | 3 +- monai/visualize/occlusion_sensitivity.py | 3 +- setup.py | 3 +- 35 files changed, 208 insertions(+), 111 deletions(-) diff --git a/monai/data/csv_saver.py b/monai/data/csv_saver.py index 4b876cbcaf..5f5e415055 100644 --- a/monai/data/csv_saver.py +++ b/monai/data/csv_saver.py @@ -37,7 +37,8 @@ def __init__(self, output_dir: str = "./", filename: str = "predictions.csv", ov """ self.output_dir = output_dir self._cache_dict: OrderedDict = OrderedDict() - assert isinstance(filename, str) and filename[-4:] == ".csv", "filename must be a string with CSV format." + if not (isinstance(filename, str) and filename[-4:] == ".csv"): + raise AssertionError("filename must be a string with CSV format.") self._filepath = os.path.join(output_dir, filename) self.overwrite = overwrite self._data_index = 0 @@ -76,7 +77,8 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] self._data_index += 1 if torch.is_tensor(data): data = data.detach().cpu().numpy() - assert isinstance(data, np.ndarray) + if not isinstance(data, np.ndarray): + raise AssertionError self._cache_dict[save_key] = data.astype(np.float32) def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: diff --git a/monai/data/nifti_writer.py b/monai/data/nifti_writer.py index 3ffffbbed0..6837ebeb90 100644 --- a/monai/data/nifti_writer.py +++ b/monai/data/nifti_writer.py @@ -89,7 +89,8 @@ def write_nifti( the output data type is always ``np.float32``. output_dtype: data type for saving data. Defaults to ``np.float32``. """ - assert isinstance(data, np.ndarray), "input data must be numpy array." + if not isinstance(data, np.ndarray): + raise AssertionError("input data must be numpy array.") dtype = dtype or data.dtype sr = min(data.ndim, 3) if affine is None: diff --git a/monai/data/png_writer.py b/monai/data/png_writer.py index 3f5bb733fe..d7baa6ea79 100644 --- a/monai/data/png_writer.py +++ b/monai/data/png_writer.py @@ -47,7 +47,8 @@ def write_png( ValueError: When ``scale`` is not one of [255, 65535]. """ - assert isinstance(data, np.ndarray), "input data must be numpy array." + if not isinstance(data, np.ndarray): + raise AssertionError("input data must be numpy array.") if len(data.shape) == 3 and data.shape[2] == 1: # PIL Image can't save image with 1 channel data = data.squeeze(2) if output_spatial_shape is not None: diff --git a/monai/data/synthetic.py b/monai/data/synthetic.py index d1d0171527..90cbe13c2d 100644 --- a/monai/data/synthetic.py +++ b/monai/data/synthetic.py @@ -68,7 +68,8 @@ def create_test_image_2d( noisyimage = rescale_array(np.maximum(image, norm)) if channel_dim is not None: - assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 2), "invalid channel dim." + if not (isinstance(channel_dim, int) and channel_dim in (-1, 0, 2)): + raise AssertionError("invalid channel dim.") if channel_dim == 0: noisyimage = noisyimage[None] labels = labels[None] @@ -131,7 +132,8 @@ def create_test_image_3d( noisyimage = rescale_array(np.maximum(image, norm)) if channel_dim is not None: - assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 3), "invalid channel dim." + if not (isinstance(channel_dim, int) and channel_dim in (-1, 0, 3)): + raise AssertionError("invalid channel dim.") noisyimage, labels = ( (noisyimage[None], labels[None]) if channel_dim == 0 else (noisyimage[..., None], labels[..., None]) ) diff --git a/monai/engines/utils.py b/monai/engines/utils.py index 028051883f..7a2dc40b8d 100644 --- a/monai/engines/utils.py +++ b/monai/engines/utils.py @@ -89,7 +89,8 @@ def default_prepare_batch( image, label(optional). """ - assert isinstance(batchdata, dict), "default prepare_batch expects dictionary input data." + if not isinstance(batchdata, dict): + raise AssertionError("default prepare_batch expects dictionary input data.") if CommonKeys.LABEL in batchdata: return ( batchdata[CommonKeys.IMAGE].to(device=device, non_blocking=non_blocking), diff --git a/monai/engines/workflow.py b/monai/engines/workflow.py index ebb16ec362..1d8c74c4bb 100644 --- a/monai/engines/workflow.py +++ b/monai/engines/workflow.py @@ -124,7 +124,8 @@ def set_sampler_epoch(engine: Engine): @self.on(Events.ITERATION_COMPLETED) def run_post_transform(engine: Engine) -> None: - assert post_transform is not None + if post_transform is None: + raise AssertionError engine.state.output = apply_transform(post_transform, engine.state.output) if key_metric is not None: diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py index 617182107f..648cc8360a 100644 --- a/monai/handlers/checkpoint_loader.py +++ b/monai/handlers/checkpoint_loader.py @@ -54,9 +54,11 @@ def __init__( name: Optional[str] = None, map_location: Optional[Dict] = None, ) -> None: - assert load_path is not None, "must provide clear path to load checkpoint." + if load_path is None: + raise AssertionError("must provide clear path to load checkpoint.") self.load_path = load_path - assert load_dict is not None and len(load_dict) > 0, "must provide target objects to load." + if not (load_dict is not None and len(load_dict) > 0): + raise AssertionError("must provide target objects to load.") self.logger = logging.getLogger(name) self.load_dict = load_dict self._name = name diff --git a/monai/handlers/checkpoint_saver.py b/monai/handlers/checkpoint_saver.py index 99a9c0b756..8052e21cb6 100644 --- a/monai/handlers/checkpoint_saver.py +++ b/monai/handlers/checkpoint_saver.py @@ -88,9 +88,11 @@ def __init__( save_interval: int = 0, n_saved: Optional[int] = None, ) -> None: - assert save_dir is not None, "must provide directory to save the checkpoints." + if save_dir is None: + raise AssertionError("must provide directory to save the checkpoints.") self.save_dir = save_dir - assert save_dict is not None and len(save_dict) > 0, "must provide source objects to save." + if not (save_dict is not None and len(save_dict) > 0): + raise AssertionError("must provide source objects to save.") self.save_dict = save_dict self.logger = logging.getLogger(name) self.epoch_level = epoch_level @@ -202,12 +204,15 @@ def completed(self, engine: Engine) -> None: Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ - assert callable(self._final_checkpoint), "Error: _final_checkpoint function not specified." + if not callable(self._final_checkpoint): + raise AssertionError("Error: _final_checkpoint function not specified.") # delete previous saved final checkpoint if existing self._delete_previous_final_ckpt() self._final_checkpoint(engine) - assert self.logger is not None - assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute." + if self.logger is None: + raise AssertionError + if not hasattr(self.logger, "info"): + raise AssertionError("Error, provided logger has not info attribute.") self.logger.info(f"Train completed, saved final checkpoint: {self._final_checkpoint.last_checkpoint}") def exception_raised(self, engine: Engine, e: Exception) -> None: @@ -219,12 +224,15 @@ def exception_raised(self, engine: Engine, e: Exception) -> None: engine: Ignite Engine, it can be a trainer, validator or evaluator. e: the exception caught in Ignite during engine.run(). """ - assert callable(self._final_checkpoint), "Error: _final_checkpoint function not specified." + if not callable(self._final_checkpoint): + raise AssertionError("Error: _final_checkpoint function not specified.") # delete previous saved final checkpoint if existing self._delete_previous_final_ckpt() self._final_checkpoint(engine) - assert self.logger is not None - assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute." + if self.logger is None: + raise AssertionError + if not hasattr(self.logger, "info"): + raise AssertionError("Error, provided logger has not info attribute.") self.logger.info(f"Exception_raised, saved exception checkpoint: {self._final_checkpoint.last_checkpoint}") raise e @@ -234,7 +242,8 @@ def metrics_completed(self, engine: Engine) -> None: Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ - assert callable(self._key_metric_checkpoint), "Error: _key_metric_checkpoint function not specified." + if not callable(self._key_metric_checkpoint): + raise AssertionError("Error: _key_metric_checkpoint function not specified.") self._key_metric_checkpoint(engine) def interval_completed(self, engine: Engine) -> None: @@ -244,10 +253,13 @@ def interval_completed(self, engine: Engine) -> None: Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ - assert callable(self._interval_checkpoint), "Error: _interval_checkpoint function not specified." + if not callable(self._interval_checkpoint): + raise AssertionError("Error: _interval_checkpoint function not specified.") self._interval_checkpoint(engine) - assert self.logger is not None - assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute." + if self.logger is None: + raise AssertionError + if not hasattr(self.logger, "info"): + raise AssertionError("Error, provided logger has not info attribute.") if self.epoch_level: self.logger.info(f"Saved checkpoint at epoch: {engine.state.epoch}") else: diff --git a/monai/inferers/utils.py b/monai/inferers/utils.py index 170016aca4..903cf84a0c 100644 --- a/monai/inferers/utils.py +++ b/monai/inferers/utils.py @@ -82,7 +82,8 @@ def sliding_window_inference( """ num_spatial_dims = len(inputs.shape) - 2 - assert 0 <= overlap < 1, "overlap must be >= 0 and < 1." + if 0 > overlap: + raise AssertionError("overlap must be >= 0 and < 1.") # determine image spatial size and batch size # Note: all input images must have the same image size and batch size diff --git a/monai/losses/dice.py b/monai/losses/dice.py index 278ac52281..807bcc4bbc 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -135,9 +135,10 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: target = target[:, 1:] input = input[:, 1:] - assert ( - target.shape == input.shape - ), f"ground truth has differing shape ({target.shape}) from input ({input.shape})" + if ( + target.shape != input.shape + ): + raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") # reducing only spatial dimensions (not batch nor channels) reduce_axis = list(range(2, len(input.shape))) @@ -192,16 +193,20 @@ def forward(self, input: torch.Tensor, target: torch.Tensor, mask: Optional[torc """ if mask is not None: # checking if mask is of proper shape - assert input.dim() == mask.dim(), f"dim of input ({input.shape}) is different from mask ({mask.shape})" - assert ( + if input.dim() != mask.dim(): + raise AssertionError(f"dim of input ({input.shape}) is different from mask ({mask.shape})") + if not ( input.shape[0] == mask.shape[0] or mask.shape[0] == 1 - ), f" batch size of mask ({mask.shape}) must be 1 or equal to input ({input.shape})" + ): + raise AssertionError(f" batch size of mask ({mask.shape}) must be 1 or equal to input ({input.shape})") if target.dim() > 1: - assert mask.shape[1] == 1, f"mask ({mask.shape}) must have only 1 channel" - assert ( - input.shape[2:] == mask.shape[2:] - ), f"spatial size of input ({input.shape}) is different from mask ({mask.shape})" + if mask.shape[1] != 1: + raise AssertionError(f"mask ({mask.shape}) must have only 1 channel") + if ( + input.shape[2:] != mask.shape[2:] + ): + raise AssertionError(f"spatial size of input ({input.shape}) is different from mask ({mask.shape})") input = input * mask target = target * mask @@ -322,9 +327,10 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: target = target[:, 1:] input = input[:, 1:] - assert ( - target.shape == input.shape - ), f"ground truth has differing shape ({target.shape}) from input ({input.shape})" + if ( + target.shape != input.shape + ): + raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") # reducing only spatial dimensions (not batch nor channels) reduce_axis = list(range(2, len(input.shape))) diff --git a/monai/losses/tversky.py b/monai/losses/tversky.py index 0603c0aed8..3a2e8eb861 100644 --- a/monai/losses/tversky.py +++ b/monai/losses/tversky.py @@ -130,9 +130,10 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: target = target[:, 1:] input = input[:, 1:] - assert ( - target.shape == input.shape - ), f"ground truth has differing shape ({target.shape}) from input ({input.shape})" + if ( + target.shape != input.shape + ): + raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") p0 = input p1 = 1 - p0 diff --git a/monai/metrics/rocauc.py b/monai/metrics/rocauc.py index 164b32ddb7..de378e473e 100644 --- a/monai/metrics/rocauc.py +++ b/monai/metrics/rocauc.py @@ -20,12 +20,14 @@ def _calculate(y: torch.Tensor, y_pred: torch.Tensor) -> float: - assert y.ndimension() == y_pred.ndimension() == 1 and len(y) == len( + if not (y.ndimension() == y_pred.ndimension() == 1 and len(y) == len( y_pred - ), "y and y_pred must be 1 dimension data with same length." - assert y.unique().equal( + )): + raise AssertionError("y and y_pred must be 1 dimension data with same length.") + if not y.unique().equal( torch.tensor([0, 1], dtype=y.dtype, device=y.device) - ), "y values must be 0 or 1, can not be all 0 or all 1." + ): + raise AssertionError("y values must be 0 or 1, can not be all 0 or all 1.") n = len(y) indices = y_pred.argsort() y = y[indices].cpu().numpy() @@ -126,7 +128,8 @@ def compute_roc_auc( raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.") y_pred = other_act(y_pred) - assert y.shape == y_pred.shape, "data shapes of y_pred and y do not match." + if y.shape != y_pred.shape: + raise AssertionError("data shapes of y_pred and y do not match.") average = Average(average) if average == Average.MICRO: diff --git a/monai/networks/blocks/dynunet_block.py b/monai/networks/blocks/dynunet_block.py index d43958de0c..761e6490c0 100644 --- a/monai/networks/blocks/dynunet_block.py +++ b/monai/networks/blocks/dynunet_block.py @@ -229,7 +229,8 @@ def get_norm_layer(spatial_dims: int, out_channels: int, norm_name: str, num_gro if norm_name not in ["batch", "instance", "group"]: raise ValueError(f"Unsupported normalization mode: {norm_name}") if norm_name == "group": - assert out_channels % num_groups == 0, "out_channels should be divisible by num_groups." + if out_channels % num_groups != 0: + raise AssertionError("out_channels should be divisible by num_groups.") norm = Norm[norm_name](num_groups=num_groups, num_channels=out_channels, affine=True) else: norm = Norm[norm_name, spatial_dims](out_channels, affine=True) @@ -277,7 +278,8 @@ def get_padding( stride_np = np.atleast_1d(stride) padding_np = (kernel_size_np - stride_np + 1) / 2 error_msg = "padding value should not be negative, please change the kernel size and/or stride." - assert np.min(padding_np) >= 0, error_msg + if np.min(padding_np) < 0: + raise AssertionError(error_msg) padding = tuple(int(p) for p in padding_np) return padding if len(padding) > 1 else padding[0] @@ -294,7 +296,8 @@ def get_output_padding( out_padding_np = 2 * padding_np + stride_np - kernel_size_np error_msg = "out_padding value should not be negative, please change the kernel size and/or stride." - assert np.min(out_padding_np) >= 0, error_msg + if np.min(out_padding_np) < 0: + raise AssertionError(error_msg) out_padding = tuple(int(p) for p in out_padding_np) return out_padding if len(out_padding) > 1 else out_padding[0] diff --git a/monai/networks/blocks/segresnet_block.py b/monai/networks/blocks/segresnet_block.py index 6aaa9774b5..e95466ca7e 100644 --- a/monai/networks/blocks/segresnet_block.py +++ b/monai/networks/blocks/segresnet_block.py @@ -90,8 +90,10 @@ def __init__( super().__init__() - assert kernel_size % 2 == 1, "kernel_size should be an odd number." - assert in_channels % num_groups == 0, "in_channels should be divisible by num_groups." + if kernel_size % 2 != 1: + raise AssertionError("kernel_size should be an odd number.") + if in_channels % num_groups != 0: + raise AssertionError("in_channels should be divisible by num_groups.") self.norm1 = get_norm_layer(spatial_dims, in_channels, norm_name, num_groups=num_groups) self.norm2 = get_norm_layer(spatial_dims, in_channels, norm_name, num_groups=num_groups) diff --git a/monai/networks/nets/ahnet.py b/monai/networks/nets/ahnet.py index ff9ddbc598..5146930fca 100644 --- a/monai/networks/nets/ahnet.py +++ b/monai/networks/nets/ahnet.py @@ -372,8 +372,10 @@ def __init__( self.spatial_dims = spatial_dims self.psp_block_num = psp_block_num - assert spatial_dims in [2, 3], "spatial_dims can only be 2 or 3." - assert psp_block_num in [0, 1, 2, 3, 4], "psp_block_num should be an integer that belongs to [0, 4]." + if spatial_dims not in [2, 3]: + raise AssertionError("spatial_dims can only be 2 or 3.") + if psp_block_num not in [0, 1, 2, 3, 4]: + raise AssertionError("psp_block_num should be an integer that belongs to [0, 4].") self.conv1 = conv_type( in_channels, diff --git a/monai/networks/nets/dynunet.py b/monai/networks/nets/dynunet.py index 8958199d3f..fc52012158 100644 --- a/monai/networks/nets/dynunet.py +++ b/monai/networks/nets/dynunet.py @@ -131,8 +131,10 @@ def create_skips(index, downsamples, upsamples, superheads, bottleneck): shouldn't be associated with a supervision head. """ - assert len(downsamples) == len(upsamples), f"{len(downsamples)} != {len(upsamples)}" - assert (len(downsamples) - len(superheads)) in (1, 0), f"{len(downsamples)}-(0,1) != {len(superheads)}" + if len(downsamples) != len(upsamples): + raise AssertionError(f"{len(downsamples)} != {len(upsamples)}") + if (len(downsamples) - len(superheads)) not in (1, 0): + raise AssertionError(f"{len(downsamples)}-(0,1) != {len(superheads)}") if len(downsamples) == 0: # bottom of the network, pass the bottleneck block return bottleneck @@ -157,22 +159,26 @@ def create_skips(index, downsamples, upsamples, superheads, bottleneck): def check_kernel_stride(self): kernels, strides = self.kernel_size, self.strides error_msg = "length of kernel_size and strides should be the same, and no less than 3." - assert len(kernels) == len(strides) and len(kernels) >= 3, error_msg + if not (len(kernels) == len(strides) and len(kernels) >= 3): + raise AssertionError(error_msg) for idx in range(len(kernels)): kernel, stride = kernels[idx], strides[idx] if not isinstance(kernel, int): error_msg = "length of kernel_size in block {} should be the same as spatial_dims.".format(idx) - assert len(kernel) == self.spatial_dims, error_msg + if len(kernel) != self.spatial_dims: + raise AssertionError(error_msg) if not isinstance(stride, int): error_msg = "length of stride in block {} should be the same as spatial_dims.".format(idx) - assert len(stride) == self.spatial_dims, error_msg + if len(stride) != self.spatial_dims: + raise AssertionError(error_msg) def check_deep_supr_num(self): deep_supr_num, strides = self.deep_supr_num, self.strides num_up_layers = len(strides) - 1 error_msg = "deep_supr_num should be less than the number of up sample layers." - assert 1 <= deep_supr_num < num_up_layers, error_msg + if 1 > deep_supr_num: + raise AssertionError(error_msg) def forward(self, x): out = self.skip_layers(x) diff --git a/monai/networks/nets/segresnet.py b/monai/networks/nets/segresnet.py index 7972d1537f..12d196c9ab 100644 --- a/monai/networks/nets/segresnet.py +++ b/monai/networks/nets/segresnet.py @@ -66,7 +66,8 @@ def __init__( ): super().__init__() - assert spatial_dims == 2 or spatial_dims == 3, "spatial_dims can only be 2 or 3." + if not (spatial_dims == 2 or spatial_dims == 3): + raise AssertionError("spatial_dims can only be 2 or 3.") self.spatial_dims = spatial_dims self.init_filters = init_filters diff --git a/monai/networks/nets/vnet.py b/monai/networks/nets/vnet.py index 517328f6ce..49bb5f4163 100644 --- a/monai/networks/nets/vnet.py +++ b/monai/networks/nets/vnet.py @@ -212,7 +212,8 @@ def __init__( ): super().__init__() - assert spatial_dims == 2 or spatial_dims == 3, "spatial_dims can only be 2 or 3." + if not (spatial_dims == 2 or spatial_dims == 3): + raise AssertionError("spatial_dims can only be 2 or 3.") self.in_tr = InputTransition(spatial_dims, in_channels, 16, act) self.down_tr32 = DownTransition(spatial_dims, 16, 1, act) diff --git a/monai/networks/utils.py b/monai/networks/utils.py index bc3d291203..175d3d8b73 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -45,7 +45,8 @@ def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype = torch.f For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0. Note that this will include the background label, thus a binary mask should be treated as having 2 classes. """ - assert labels.dim() > 0, "labels should have dim of 1 or more." + if labels.dim() <= 0: + raise AssertionError("labels should have dim of 1 or more.") # if `dim` is bigger, add singleton dim at the end if labels.ndim < dim + 1: @@ -54,7 +55,8 @@ def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype = torch.f sh = list(labels.shape) - assert sh[dim] == 1, "labels should have a channel with length equals to one." + if sh[dim] != 1: + raise AssertionError("labels should have a channel with length equals to one.") sh[dim] = num_classes o = torch.zeros(size=sh, dtype=dtype, device=labels.device) diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index 073d581ee1..e59eb89ac7 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -312,7 +312,8 @@ def __call__(self, img: np.ndarray) -> np.ndarray: slicing doesn't apply to the channel dim. """ self.randomize(img.shape[1:]) - assert self._size is not None + if self._size is None: + raise AssertionError if self.random_center: return img[self._slices] cropper = CenterSpatialCrop(self._size) diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index ad224e5c33..8bf33dd632 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -301,7 +301,8 @@ def randomize(self, img_size: Sequence[int]) -> None: def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) self.randomize(d[self.keys[0]].shape[1:]) # image shape from the first data key - assert self._size is not None + if self._size is None: + raise AssertionError for key in self.keys: if self.random_center: d[key] = d[key][self._slices] @@ -573,8 +574,10 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n bg_indices = d.get(self.bg_indices_key, None) if self.bg_indices_key is not None else None self.randomize(label, fg_indices, bg_indices, image) - assert isinstance(self.spatial_size, tuple) - assert self.centers is not None + if not isinstance(self.spatial_size, tuple): + raise AssertionError + if self.centers is None: + raise AssertionError results: List[Dict[Hashable, np.ndarray]] = [{} for _ in range(self.num_samples)] for key in data.keys(): if key in self.keys: diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index d1ba8ce7fa..3b7b14966e 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -73,7 +73,8 @@ def __call__(self, img: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, Apply the transform to `img`. """ self.randomize(img.shape) - assert self._noise is not None + if self._noise is None: + raise AssertionError if not self._do_transform: return img dtype = dtype_torch_to_numpy(img.dtype) if isinstance(img, torch.Tensor) else img.dtype @@ -113,7 +114,8 @@ def __init__(self, offsets: Union[Tuple[float, float], float], prob: float = 0.1 if isinstance(offsets, (int, float)): self.offsets = (min(-offsets, offsets), max(-offsets, offsets)) else: - assert len(offsets) == 2, "offsets should be a number or pair of numbers." + if len(offsets) != 2: + raise AssertionError("offsets should be a number or pair of numbers.") self.offsets = (min(offsets), max(offsets)) self.prob = prob @@ -185,7 +187,8 @@ def __init__(self, factors: Union[Tuple[float, float], float], prob: float = 0.1 if isinstance(factors, (int, float)): self.factors = (min(-factors, factors), max(-factors, factors)) else: - assert len(factors) == 2, "factors should be a number or pair of numbers." + if len(factors) != 2: + raise AssertionError("factors should be a number or pair of numbers.") self.factors = (min(factors), max(factors)) self.prob = prob @@ -290,7 +293,8 @@ class ThresholdIntensity(Transform): """ def __init__(self, threshold: float, above: bool = True, cval: float = 0.0) -> None: - assert isinstance(threshold, (int, float)), "threshold must be a float or int number." + if not isinstance(threshold, (int, float)): + raise AssertionError("threshold must be a float or int number.") self.threshold = threshold self.above = above self.cval = cval @@ -349,7 +353,8 @@ class AdjustContrast(Transform): """ def __init__(self, gamma: float) -> None: - assert isinstance(gamma, (int, float)), "gamma must be a float or int number." + if not isinstance(gamma, (int, float)): + raise AssertionError("gamma must be a float or int number.") self.gamma = gamma def __call__(self, img: np.ndarray) -> np.ndarray: @@ -378,10 +383,12 @@ def __init__(self, prob: float = 0.1, gamma: Union[Sequence[float], float] = (0. self.prob = prob if isinstance(gamma, (int, float)): - assert gamma > 0.5, "if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)" + if gamma <= 0.5: + raise AssertionError("if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)") self.gamma = (0.5, gamma) else: - assert len(gamma) == 2, "gamma should be a number or pair of numbers." + if len(gamma) != 2: + raise AssertionError("gamma should be a number or pair of numbers.") self.gamma = (min(gamma), max(gamma)) self._do_transform = False @@ -396,7 +403,8 @@ def __call__(self, img: np.ndarray) -> np.ndarray: Apply the transform to `img`. """ self.randomize() - assert self.gamma_value is not None + if self.gamma_value is None: + raise AssertionError if not self._do_transform: return img adjuster = AdjustContrast(self.gamma_value) @@ -461,8 +469,10 @@ class ScaleIntensityRangePercentiles(Transform): def __init__( self, lower: float, upper: float, b_min: float, b_max: float, clip: bool = False, relative: bool = False ) -> None: - assert 0.0 <= lower <= 100.0, "Percentiles must be in the range [0, 100]" - assert 0.0 <= upper <= 100.0, "Percentiles must be in the range [0, 100]" + if 0.0 > lower: + raise AssertionError("Percentiles must be in the range [0, 100]") + if 0.0 > upper: + raise AssertionError("Percentiles must be in the range [0, 100]") self.lower = lower self.upper = upper self.b_min = b_min @@ -768,11 +778,14 @@ class RandHistogramShift(Randomizable, Transform): def __init__(self, num_control_points: Union[Tuple[int, int], int] = 10, prob: float = 0.1) -> None: if isinstance(num_control_points, int): - assert num_control_points > 2, "num_control_points should be greater than or equal to 3" + if num_control_points <= 2: + raise AssertionError("num_control_points should be greater than or equal to 3") self.num_control_points = (num_control_points, num_control_points) else: - assert len(num_control_points) == 2, "num_control points should be a number or a pair of numbers" - assert min(num_control_points) > 2, "num_control_points should be greater than or equal to 3" + if len(num_control_points) != 2: + raise AssertionError("num_control points should be a number or a pair of numbers") + if min(num_control_points) <= 2: + raise AssertionError("num_control_points should be greater than or equal to 3") self.num_control_points = (min(num_control_points), max(num_control_points)) self.prob = prob self._do_transform = False diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index fa3144d40c..a3a3e1fd57 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -124,7 +124,8 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda image_shape = d[self.keys[0]].shape # image shape from the first data key self.randomize(image_shape) - assert self._noise is not None + if self._noise is None: + raise AssertionError if not self._do_transform: return d for key in self.keys: @@ -175,7 +176,8 @@ def __init__(self, keys: KeysCollection, offsets: Union[Tuple[float, float], flo if isinstance(offsets, (int, float)): self.offsets = (min(-offsets, offsets), max(-offsets, offsets)) else: - assert len(offsets) == 2, "offsets should be a number or pair of numbers." + if len(offsets) != 2: + raise AssertionError("offsets should be a number or pair of numbers.") self.offsets = (min(offsets), max(offsets)) self.prob = prob @@ -246,7 +248,8 @@ def __init__(self, keys: KeysCollection, factors: Union[Tuple[float, float], flo if isinstance(factors, (int, float)): self.factors = (min(-factors, factors), max(-factors, factors)) else: - assert len(factors) == 2, "factors should be a number or pair of numbers." + if len(factors) != 2: + raise AssertionError("factors should be a number or pair of numbers.") self.factors = (min(factors), max(factors)) self.prob = prob @@ -399,10 +402,12 @@ def __init__( self.prob: float = prob if isinstance(gamma, (int, float)): - assert gamma > 0.5, "if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)" + if gamma <= 0.5: + raise AssertionError("if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)") self.gamma = (0.5, gamma) else: - assert len(gamma) == 2, "gamma should be a number or pair of numbers." + if len(gamma) != 2: + raise AssertionError("gamma should be a number or pair of numbers.") self.gamma = (min(gamma), max(gamma)) self._do_transform = False @@ -415,7 +420,8 @@ def randomize(self, data: Optional[Any] = None) -> None: def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) self.randomize() - assert self.gamma_value is not None + if self.gamma_value is None: + raise AssertionError if not self._do_transform: return d adjuster = AdjustContrast(self.gamma_value) @@ -689,11 +695,14 @@ def __init__( ) -> None: super().__init__(keys) if isinstance(num_control_points, int): - assert num_control_points > 2, "num_control_points should be greater than or equal to 3" + if num_control_points <= 2: + raise AssertionError("num_control_points should be greater than or equal to 3") self.num_control_points = (num_control_points, num_control_points) else: - assert len(num_control_points) == 2, "num_control points should be a number or a pair of numbers" - assert min(num_control_points) > 2, "num_control_points should be greater than or equal to 3" + if len(num_control_points) != 2: + raise AssertionError("num_control points should be a number or a pair of numbers") + if min(num_control_points) <= 2: + raise AssertionError("num_control_points should be greater than or equal to 3") self.num_control_points = (min(num_control_points), max(num_control_points)) self.prob = prob self._do_transform = False diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index f1b92025a7..f82bec1f75 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -205,9 +205,10 @@ def __call__(self, filename: Union[Sequence[Union[Path, str]], Path, str]): continue compatible_meta[meta_key] = meta_datum else: - assert np.allclose( + if not np.allclose( header["affine"], compatible_meta["affine"] - ), "affine data of all images should be same." + ): + raise AssertionError("affine data of all images should be same.") img_array = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] if self.image_only: @@ -262,9 +263,10 @@ def __call__(self, filename: Union[Sequence[Union[Path, str]], Path, str]): if not compatible_meta: compatible_meta = meta else: - assert np.allclose( + if not np.allclose( meta["spatial_shape"], compatible_meta["spatial_shape"] - ), "all the images in the list should have same spatial shape." + ): + raise AssertionError("all the images in the list should have same spatial shape.") img_array = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] return img_array if self.image_only else (img_array, compatible_meta) @@ -326,9 +328,10 @@ def _save_data_meta(data_array, name, data, compatible_meta): if not compatible_meta: compatible_meta = meta else: - assert np.allclose( + if not np.allclose( meta["spatial_shape"], compatible_meta["spatial_shape"] - ), "all the data in the list should have same shape." + ): + raise AssertionError("all the data in the list should have same shape.") return compatible_meta for name in filename: diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 474fbd0a50..dd6b049d17 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -169,9 +169,11 @@ def __call__(self, data): d = dict(data) for key in self.keys: data = self.loader(d[key]) - assert isinstance(data, (tuple, list)), "loader must return a tuple or list." + if not isinstance(data, (tuple, list)): + raise AssertionError("loader must return a tuple or list.") d[key] = data[0] - assert isinstance(data[1], dict), "metadata must be a dict." + if not isinstance(data[1], dict): + raise AssertionError("metadata must be a dict.") key_to_add = f"{key}_{self.meta_key_postfix}" if key_to_add in d and not self.overwriting: raise KeyError(f"Meta data with key {key_to_add} already exists and overwriting=False.") diff --git a/monai/transforms/post/array.py b/monai/transforms/post/array.py index 3249fbaf71..0c60b0cc89 100644 --- a/monai/transforms/post/array.py +++ b/monai/transforms/post/array.py @@ -163,7 +163,8 @@ def __call__( if to_onehot or self.to_onehot: _nclasses = self.n_classes if n_classes is None else n_classes - assert isinstance(_nclasses, int), "One of self.n_classes or n_classes must be an integer" + if not isinstance(_nclasses, int): + raise AssertionError("One of self.n_classes or n_classes must be an integer") img = one_hot(img, _nclasses) if threshold_values or self.threshold_values: diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 26cbf89f33..3e1ded4e94 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -264,7 +264,8 @@ def __call__( if self.as_closest_canonical: spatial_ornt = src else: - assert self.axcodes is not None + if self.axcodes is None: + raise AssertionError dst = nib.orientations.axcodes2ornt(self.axcodes[:sr], labels=self.labels) if len(dst) < sr: raise ValueError( @@ -807,7 +808,8 @@ def __init__( ) -> None: self.min_zoom = ensure_tuple(min_zoom) self.max_zoom = ensure_tuple(max_zoom) - assert len(self.min_zoom) == len(self.max_zoom), "min_zoom and max_zoom must have same length." + if len(self.min_zoom) != len(self.max_zoom): + raise AssertionError("min_zoom and max_zoom must have same length.") self.prob = prob self.mode: InterpolateMode = InterpolateMode(mode) self.padding_mode: NumpyPadMode = NumpyPadMode(padding_mode) @@ -1129,7 +1131,8 @@ def __call__( if not torch.is_tensor(img): img = torch.as_tensor(np.ascontiguousarray(img)) - assert grid is not None, "Error, grid argument must be supplied as an ndarray or tensor " + if grid is None: + raise AssertionError("Error, grid argument must be supplied as an ndarray or tensor ") grid = torch.tensor(grid) if not torch.is_tensor(grid) else grid.detach().clone() if self.device: img = img.to(self.device) @@ -1624,7 +1627,8 @@ def __call__( self.randomize(grid_size=sp_size) grid = create_grid(spatial_size=sp_size) if self.do_transform: - assert self.rand_offset is not None + if self.rand_offset is None: + raise AssertionError grid = torch.as_tensor(np.ascontiguousarray(grid), device=self.device) gaussian = GaussianFilter(3, self.sigma, 3.0).to(device=self.device) offset = torch.as_tensor(self.rand_offset, device=self.device).unsqueeze(0) diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 2113f6c2d0..615a327d90 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -1012,7 +1012,8 @@ def __init__( super().__init__(keys) self.min_zoom = ensure_tuple(min_zoom) self.max_zoom = ensure_tuple(max_zoom) - assert len(self.min_zoom) == len(self.max_zoom), "min_zoom and max_zoom must have same length." + if len(self.min_zoom) != len(self.max_zoom): + raise AssertionError("min_zoom and max_zoom must have same length.") self.prob = prob self.mode = ensure_tuple_rep(mode, len(self.keys)) diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index a851a56a44..5476e800f4 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -83,7 +83,8 @@ class AsChannelFirst(Transform): """ def __init__(self, channel_dim: int = -1) -> None: - assert isinstance(channel_dim, int) and channel_dim >= -1, "invalid channel dimension." + if not (isinstance(channel_dim, int) and channel_dim >= -1): + raise AssertionError("invalid channel dimension.") self.channel_dim = channel_dim def __call__(self, img: np.ndarray) -> np.ndarray: @@ -109,7 +110,8 @@ class AsChannelLast(Transform): """ def __init__(self, channel_dim: int = 0) -> None: - assert isinstance(channel_dim, int) and channel_dim >= -1, "invalid channel dimension." + if not (isinstance(channel_dim, int) and channel_dim >= -1): + raise AssertionError("invalid channel dimension.") self.channel_dim = channel_dim def __call__(self, img: np.ndarray) -> np.ndarray: @@ -151,7 +153,8 @@ class RepeatChannel(Transform): """ def __init__(self, repeats: int) -> None: - assert repeats > 0, "repeats count must be greater than 0." + if repeats <= 0: + raise AssertionError("repeats count must be greater than 0.") self.repeats = repeats def __call__(self, img: np.ndarray) -> np.ndarray: @@ -335,7 +338,8 @@ def __init__( TypeError: When ``additional_info`` is not an ``Optional[Callable]``. """ - assert isinstance(prefix, str), "prefix must be a string." + if not isinstance(prefix, str): + raise AssertionError("prefix must be a string.") self.prefix = prefix self.data_shape = data_shape self.value_range = value_range diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 7b95fdab9e..1427f24356 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -261,7 +261,8 @@ def __call__( for key in self.keys: rets = self.splitter(d[key]) postfixes: Sequence = list(range(len(rets))) if self.output_postfixes is None else self.output_postfixes - assert len(postfixes) == len(rets), "count of split results must match output_postfixes." + if len(postfixes) != len(rets): + raise AssertionError("count of split results must match output_postfixes.") for i, r in enumerate(rets): split_key = f"{key}_{postfixes[i]}" if split_key in d: diff --git a/monai/utils/aliases.py b/monai/utils/aliases.py index e4ef40da11..5bcb512f3c 100644 --- a/monai/utils/aliases.py +++ b/monai/utils/aliases.py @@ -60,7 +60,8 @@ def resolve_name(name): with alias_lock: obj = GlobalAliases.get(name, None) - assert name not in GlobalAliases or obj is not None + if not (name not in GlobalAliases or obj is not None): + raise AssertionError # attempt to resolve a qualified name if obj is None and "." in name: diff --git a/monai/utils/module.py b/monai/utils/module.py index 1d06585e3b..4e87e835b1 100644 --- a/monai/utils/module.py +++ b/monai/utils/module.py @@ -188,7 +188,8 @@ def optional_import( the_module = import_module(module) if not allow_namespace_pkg: is_namespace = getattr(the_module, "__file__", None) is None and hasattr(the_module, "__path__") - assert not is_namespace + if is_namespace: + raise AssertionError if name: # user specified to load class/function/... from the module the_module = getattr(the_module, name) except Exception as import_exception: # any exceptions during import diff --git a/monai/visualize/img2tensorboard.py b/monai/visualize/img2tensorboard.py index a4880344ac..8f6eca5482 100644 --- a/monai/visualize/img2tensorboard.py +++ b/monai/visualize/img2tensorboard.py @@ -40,7 +40,8 @@ def _image3_animated_gif(tag: str, image: Union[np.ndarray, torch.Tensor], scale scale_factor: amount to multiply values by. if the image data is between 0 and 1, using 255 for this value will scale it to displayable range """ - assert len(image.shape) == 3, "3D image tensors expected to be in `HWD` format, len(image.shape) != 3" + if len(image.shape) != 3: + raise AssertionError("3D image tensors expected to be in `HWD` format, len(image.shape) != 3") ims = [(np.asarray((image[:, :, i])) * scale_factor).astype(np.uint8) for i in range(image.shape[2])] ims = [GifImage.fromarray(im) for im in ims] diff --git a/monai/visualize/occlusion_sensitivity.py b/monai/visualize/occlusion_sensitivity.py index bb9ef59e5c..5863614965 100644 --- a/monai/visualize/occlusion_sensitivity.py +++ b/monai/visualize/occlusion_sensitivity.py @@ -303,7 +303,8 @@ def __call__( # type: ignore # upsample if self.upsampler is not None: - assert len(sensitivity_ims_list[i].shape) == len(x.shape) + if len(sensitivity_ims_list[i].shape) != len(x.shape): + raise AssertionError if np.any(sensitivity_ims_list[i].shape != x.shape): img_spatial = tuple(output_im_shape[1:]) sensitivity_ims_list[i] = self.upsampler(img_spatial)(sensitivity_ims_list[i]) diff --git a/setup.py b/setup.py index e8a54cc9fc..b764472012 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,8 @@ BUILD_CUDA = (torch.cuda.is_available() and (CUDA_HOME is not None)) or FORCE_CUDA _pt_version = pkg_resources.parse_version(torch.__version__).release # type: ignore[attr-defined] - assert _pt_version is not None and len(_pt_version) >= 3, "unknown torch version" + if not (_pt_version is not None and len(_pt_version) >= 3): + raise AssertionError("unknown torch version") TORCH_VERSION = int(_pt_version[0]) * 10000 + int(_pt_version[1]) * 100 + int(_pt_version[2]) except (ImportError, TypeError, AssertionError, AttributeError) as e: TORCH_VERSION = 0 From bbdc3ecd8fd293cf42f018b3976d9780c08d743f Mon Sep 17 00:00:00 2001 From: monai-bot Date: Wed, 13 Jan 2021 17:09:59 +0000 Subject: [PATCH 2/3] [MONAI] python code formatting Signed-off-by: monai-bot --- monai/losses/dice.py | 16 ++++------------ monai/losses/tversky.py | 4 +--- monai/metrics/rocauc.py | 8 ++------ monai/transforms/intensity/array.py | 4 +++- monai/transforms/intensity/dictionary.py | 4 +++- monai/transforms/io/array.py | 12 +++--------- 6 files changed, 16 insertions(+), 32 deletions(-) diff --git a/monai/losses/dice.py b/monai/losses/dice.py index 807bcc4bbc..9bc5ad28ea 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -135,9 +135,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: target = target[:, 1:] input = input[:, 1:] - if ( - target.shape != input.shape - ): + if target.shape != input.shape: raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") # reducing only spatial dimensions (not batch nor channels) @@ -195,17 +193,13 @@ def forward(self, input: torch.Tensor, target: torch.Tensor, mask: Optional[torc # checking if mask is of proper shape if input.dim() != mask.dim(): raise AssertionError(f"dim of input ({input.shape}) is different from mask ({mask.shape})") - if not ( - input.shape[0] == mask.shape[0] or mask.shape[0] == 1 - ): + if not (input.shape[0] == mask.shape[0] or mask.shape[0] == 1): raise AssertionError(f" batch size of mask ({mask.shape}) must be 1 or equal to input ({input.shape})") if target.dim() > 1: if mask.shape[1] != 1: raise AssertionError(f"mask ({mask.shape}) must have only 1 channel") - if ( - input.shape[2:] != mask.shape[2:] - ): + if input.shape[2:] != mask.shape[2:]: raise AssertionError(f"spatial size of input ({input.shape}) is different from mask ({mask.shape})") input = input * mask @@ -327,9 +321,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: target = target[:, 1:] input = input[:, 1:] - if ( - target.shape != input.shape - ): + if target.shape != input.shape: raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") # reducing only spatial dimensions (not batch nor channels) diff --git a/monai/losses/tversky.py b/monai/losses/tversky.py index 3a2e8eb861..b1c45a74a2 100644 --- a/monai/losses/tversky.py +++ b/monai/losses/tversky.py @@ -130,9 +130,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: target = target[:, 1:] input = input[:, 1:] - if ( - target.shape != input.shape - ): + if target.shape != input.shape: raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") p0 = input diff --git a/monai/metrics/rocauc.py b/monai/metrics/rocauc.py index de378e473e..9f081d1698 100644 --- a/monai/metrics/rocauc.py +++ b/monai/metrics/rocauc.py @@ -20,13 +20,9 @@ def _calculate(y: torch.Tensor, y_pred: torch.Tensor) -> float: - if not (y.ndimension() == y_pred.ndimension() == 1 and len(y) == len( - y_pred - )): + if not (y.ndimension() == y_pred.ndimension() == 1 and len(y) == len(y_pred)): raise AssertionError("y and y_pred must be 1 dimension data with same length.") - if not y.unique().equal( - torch.tensor([0, 1], dtype=y.dtype, device=y.device) - ): + if not y.unique().equal(torch.tensor([0, 1], dtype=y.dtype, device=y.device)): raise AssertionError("y values must be 0 or 1, can not be all 0 or all 1.") n = len(y) indices = y_pred.argsort() diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 3b7b14966e..2450b60d9f 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -384,7 +384,9 @@ def __init__(self, prob: float = 0.1, gamma: Union[Sequence[float], float] = (0. if isinstance(gamma, (int, float)): if gamma <= 0.5: - raise AssertionError("if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)") + raise AssertionError( + "if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)" + ) self.gamma = (0.5, gamma) else: if len(gamma) != 2: diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index a3a3e1fd57..34d75faf63 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -403,7 +403,9 @@ def __init__( if isinstance(gamma, (int, float)): if gamma <= 0.5: - raise AssertionError("if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)") + raise AssertionError( + "if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)" + ) self.gamma = (0.5, gamma) else: if len(gamma) != 2: diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index f82bec1f75..026c0a1051 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -205,9 +205,7 @@ def __call__(self, filename: Union[Sequence[Union[Path, str]], Path, str]): continue compatible_meta[meta_key] = meta_datum else: - if not np.allclose( - header["affine"], compatible_meta["affine"] - ): + if not np.allclose(header["affine"], compatible_meta["affine"]): raise AssertionError("affine data of all images should be same.") img_array = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] @@ -263,9 +261,7 @@ def __call__(self, filename: Union[Sequence[Union[Path, str]], Path, str]): if not compatible_meta: compatible_meta = meta else: - if not np.allclose( - meta["spatial_shape"], compatible_meta["spatial_shape"] - ): + if not np.allclose(meta["spatial_shape"], compatible_meta["spatial_shape"]): raise AssertionError("all the images in the list should have same spatial shape.") img_array = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] @@ -328,9 +324,7 @@ def _save_data_meta(data_array, name, data, compatible_meta): if not compatible_meta: compatible_meta = meta else: - if not np.allclose( - meta["spatial_shape"], compatible_meta["spatial_shape"] - ): + if not np.allclose(meta["spatial_shape"], compatible_meta["spatial_shape"]): raise AssertionError("all the data in the list should have same shape.") return compatible_meta From e540428c2227aaea0df7a886db57e28cb5bf26b9 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Fri, 15 Jan 2021 11:13:26 +0000 Subject: [PATCH 3/3] fixes assertion conditions Signed-off-by: Wenqi Li --- monai/inferers/utils.py | 2 +- monai/networks/blocks/dynunet_block.py | 6 ++---- monai/networks/nets/dynunet.py | 5 ++--- monai/networks/nets/segresnet.py | 2 +- monai/networks/nets/vnet.py | 2 +- monai/transforms/intensity/array.py | 4 ++-- monai/utils/aliases.py | 2 +- setup.py | 2 +- 8 files changed, 11 insertions(+), 14 deletions(-) diff --git a/monai/inferers/utils.py b/monai/inferers/utils.py index 903cf84a0c..85779fc6d1 100644 --- a/monai/inferers/utils.py +++ b/monai/inferers/utils.py @@ -82,7 +82,7 @@ def sliding_window_inference( """ num_spatial_dims = len(inputs.shape) - 2 - if 0 > overlap: + if overlap < 0 or overlap >= 1: raise AssertionError("overlap must be >= 0 and < 1.") # determine image spatial size and batch size diff --git a/monai/networks/blocks/dynunet_block.py b/monai/networks/blocks/dynunet_block.py index 761e6490c0..577fd4d71d 100644 --- a/monai/networks/blocks/dynunet_block.py +++ b/monai/networks/blocks/dynunet_block.py @@ -277,9 +277,8 @@ def get_padding( kernel_size_np = np.atleast_1d(kernel_size) stride_np = np.atleast_1d(stride) padding_np = (kernel_size_np - stride_np + 1) / 2 - error_msg = "padding value should not be negative, please change the kernel size and/or stride." if np.min(padding_np) < 0: - raise AssertionError(error_msg) + raise AssertionError("padding value should not be negative, please change the kernel size and/or stride.") padding = tuple(int(p) for p in padding_np) return padding if len(padding) > 1 else padding[0] @@ -295,9 +294,8 @@ def get_output_padding( padding_np = np.atleast_1d(padding) out_padding_np = 2 * padding_np + stride_np - kernel_size_np - error_msg = "out_padding value should not be negative, please change the kernel size and/or stride." if np.min(out_padding_np) < 0: - raise AssertionError(error_msg) + raise AssertionError("out_padding value should not be negative, please change the kernel size and/or stride.") out_padding = tuple(int(p) for p in out_padding_np) return out_padding if len(out_padding) > 1 else out_padding[0] diff --git a/monai/networks/nets/dynunet.py b/monai/networks/nets/dynunet.py index fc52012158..ba88c35f8d 100644 --- a/monai/networks/nets/dynunet.py +++ b/monai/networks/nets/dynunet.py @@ -176,9 +176,8 @@ def check_kernel_stride(self): def check_deep_supr_num(self): deep_supr_num, strides = self.deep_supr_num, self.strides num_up_layers = len(strides) - 1 - error_msg = "deep_supr_num should be less than the number of up sample layers." - if 1 > deep_supr_num: - raise AssertionError(error_msg) + if deep_supr_num < 1 or deep_supr_num >= num_up_layers: + raise AssertionError("deep_supr_num should be less than the number of up sample layers.") def forward(self, x): out = self.skip_layers(x) diff --git a/monai/networks/nets/segresnet.py b/monai/networks/nets/segresnet.py index 12d196c9ab..c7a085b569 100644 --- a/monai/networks/nets/segresnet.py +++ b/monai/networks/nets/segresnet.py @@ -66,7 +66,7 @@ def __init__( ): super().__init__() - if not (spatial_dims == 2 or spatial_dims == 3): + if spatial_dims not in (2, 3): raise AssertionError("spatial_dims can only be 2 or 3.") self.spatial_dims = spatial_dims diff --git a/monai/networks/nets/vnet.py b/monai/networks/nets/vnet.py index 49bb5f4163..63acb5cafb 100644 --- a/monai/networks/nets/vnet.py +++ b/monai/networks/nets/vnet.py @@ -212,7 +212,7 @@ def __init__( ): super().__init__() - if not (spatial_dims == 2 or spatial_dims == 3): + if spatial_dims not in (2, 3): raise AssertionError("spatial_dims can only be 2 or 3.") self.in_tr = InputTransition(spatial_dims, in_channels, 16, act) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 2450b60d9f..2d3cca64e6 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -471,9 +471,9 @@ class ScaleIntensityRangePercentiles(Transform): def __init__( self, lower: float, upper: float, b_min: float, b_max: float, clip: bool = False, relative: bool = False ) -> None: - if 0.0 > lower: + if lower < 0.0 or lower > 100.0: raise AssertionError("Percentiles must be in the range [0, 100]") - if 0.0 > upper: + if upper < 0.0 or upper > 100.0: raise AssertionError("Percentiles must be in the range [0, 100]") self.lower = lower self.upper = upper diff --git a/monai/utils/aliases.py b/monai/utils/aliases.py index 5bcb512f3c..e8192897b8 100644 --- a/monai/utils/aliases.py +++ b/monai/utils/aliases.py @@ -60,7 +60,7 @@ def resolve_name(name): with alias_lock: obj = GlobalAliases.get(name, None) - if not (name not in GlobalAliases or obj is not None): + if name in GlobalAliases and obj is None: raise AssertionError # attempt to resolve a qualified name diff --git a/setup.py b/setup.py index b764472012..9b20df845a 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ BUILD_CUDA = (torch.cuda.is_available() and (CUDA_HOME is not None)) or FORCE_CUDA _pt_version = pkg_resources.parse_version(torch.__version__).release # type: ignore[attr-defined] - if not (_pt_version is not None and len(_pt_version) >= 3): + if _pt_version is None or len(_pt_version) < 3: raise AssertionError("unknown torch version") TORCH_VERSION = int(_pt_version[0]) * 10000 + int(_pt_version[1]) * 100 + int(_pt_version[2]) except (ImportError, TypeError, AssertionError, AttributeError) as e: