Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@ def generate_apidocs(*args):
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
"external_links": [{"url": "https://github.com/Project-MONAI/tutorials", "name": "Tutorials"}],
"collapse_navigation": True,
"icon_links": [
{
"name": "GitHub",
Expand Down
1 change: 0 additions & 1 deletion monai/data/csv_saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict]
"""
save_key = meta_data[Key.FILENAME_OR_OBJ] if meta_data else str(self._data_index)
self._data_index += 1
data_: np.ndarray
if isinstance(data, torch.Tensor):
data = data.detach().cpu().numpy()
self._cache_dict[save_key] = np.asarray(data, dtype=float)
Expand Down
2 changes: 1 addition & 1 deletion monai/data/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ def _detect_batch_size(batch_data: Sequence):
dict_batch[k] = v

return dict_batch
elif isinstance(batch_data, list):
if isinstance(batch_data, list):
batch_size = _detect_batch_size(batch_data)
list_batch = []
for b in batch_data:
Expand Down
1 change: 1 addition & 0 deletions monai/engines/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ def set_sampler_epoch(engine: Engine):
self.prepare_batch = prepare_batch
self.metric_cmp_fn = metric_cmp_fn
self.amp = amp
self.scaler: Optional[torch.cuda.amp.GradScaler] = None

if event_names is None:
event_names = [IterationEvents]
Expand Down
2 changes: 1 addition & 1 deletion monai/handlers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ def from_engine(keys: KeysCollection, first: bool = False):
def _wrapper(data):
if isinstance(data, dict):
return tuple(data[k] for k in keys)
elif isinstance(data, list) and isinstance(data[0], dict):
if isinstance(data, list) and isinstance(data[0], dict):
# if data is a list of dictionaries, extract expected keys and construct lists,
# if `first=True`, only extract keys from the first item of the list
ret = [data[0][k] if first else [i[k] for i in data] for k in keys]
Expand Down
29 changes: 11 additions & 18 deletions monai/networks/blocks/fcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,25 +191,18 @@ def forward(self, x: torch.Tensor):
fs3 = self.refine8(self.up_conv(fs2) + gcfm4)
fs4 = self.refine9(self.up_conv(fs3) + gcfm5)
return self.refine10(self.up_conv(fs4))
else:
fs1 = self.refine6(
F.interpolate(gcfm1, fm3.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm2
)
fs2 = self.refine7(F.interpolate(fs1, fm2.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm3)
fs3 = self.refine8(
F.interpolate(fs2, pool_x.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm4
)
fs4 = self.refine9(
F.interpolate(fs3, conv_x.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm5
)
return self.refine10(
F.interpolate(
fs4,
org_input.size()[2:],
mode=self.upsample_mode,
align_corners=True,
)
fs1 = self.refine6(F.interpolate(gcfm1, fm3.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm2)
fs2 = self.refine7(F.interpolate(fs1, fm2.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm3)
fs3 = self.refine8(F.interpolate(fs2, pool_x.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm4)
fs4 = self.refine9(F.interpolate(fs3, conv_x.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm5)
return self.refine10(
F.interpolate(
fs4,
org_input.size()[2:],
mode=self.upsample_mode,
align_corners=True,
)
)


class MCFCN(FCN):
Expand Down
2 changes: 1 addition & 1 deletion monai/transforms/croppad/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ def randomize(self, img_size: Sequence[int]) -> None:
self._size = fall_back_tuple(self.roi_size, img_size)
if self.random_size:
max_size = img_size if self.max_roi_size is None else fall_back_tuple(self.max_roi_size, img_size)
if any([i > j for i, j in zip(self._size, max_size)]):
if any(i > j for i, j in zip(self._size, max_size)):
raise ValueError(f"min ROI size: {self._size} is bigger than max ROI size: {max_size}.")
self._size = tuple((self.R.randint(low=self._size[i], high=max_size[i] + 1) for i in range(len(img_size))))
if self.random_center:
Expand Down
2 changes: 1 addition & 1 deletion monai/transforms/croppad/dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ def randomize(self, img_size: Sequence[int]) -> None:
self._size = fall_back_tuple(self.roi_size, img_size)
if self.random_size:
max_size = img_size if self.max_roi_size is None else fall_back_tuple(self.max_roi_size, img_size)
if any([i > j for i, j in zip(self._size, max_size)]):
if any(i > j for i, j in zip(self._size, max_size)):
raise ValueError(f"min ROI size: {self._size} is bigger than max ROI size: {max_size}.")
self._size = [self.R.randint(low=self._size[i], high=max_size[i] + 1) for i in range(len(img_size))]
if self.random_center:
Expand Down
5 changes: 2 additions & 3 deletions monai/transforms/intensity/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -1330,7 +1330,7 @@ def __init__(
raise AssertionError(
"If a sequence is passed to k_intensity, then a sequence of locations must be passed to loc"
)
elif len(k_intensity) != len(loc):
if len(k_intensity) != len(loc):
raise AssertionError("There must be one intensity_factor value for each tuple of indices in loc.")
if isinstance(self.loc[0], Sequence) and k_intensity is not None:
if not isinstance(self.k_intensity, Sequence):
Expand Down Expand Up @@ -1541,8 +1541,7 @@ def _make_sequence(self, x: torch.Tensor) -> Sequence[Sequence[float]]:
if not isinstance(self.intensity_range[0], Sequence):
intensity_range = (ensure_tuple(self.intensity_range),) * x.shape[0]
return intensity_range
else:
return ensure_tuple(self.intensity_range)
return ensure_tuple(self.intensity_range)
else:
# set default range if one not provided
return self._set_default_range(x)
Expand Down
2 changes: 1 addition & 1 deletion monai/transforms/io/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def switch_endianness(data, new="<"):
"""
if isinstance(data, np.ndarray):
# default to system endian
sys_native = ((sys.byteorder == "little") and "<") or ">"
sys_native = "<" if (sys.byteorder == "little") else ">"
current_ = sys_native if data.dtype.byteorder not in ("<", ">") else data.dtype.byteorder
if new not in ("<", ">"):
raise NotImplementedError(f"Not implemented option new={new}.")
Expand Down
10 changes: 4 additions & 6 deletions monai/transforms/post/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,12 +334,11 @@ def __call__(self, img: NdarrayTensor) -> NdarrayTensor:
"""
if isinstance(img, np.ndarray):
return np.asarray(np.where(np.isin(img, self.applied_labels), img, 0))
elif isinstance(img, torch.Tensor):
if isinstance(img, torch.Tensor):
img_arr = img.detach().cpu().numpy()
img_arr = self(img_arr)
return torch.as_tensor(img_arr, device=img.device)
else:
raise NotImplementedError(f"{self.__class__} can not handle data of type {type(img)}.")
raise NotImplementedError(f"{self.__class__} can not handle data of type {type(img)}.")


class FillHoles(Transform):
Expand Down Expand Up @@ -415,12 +414,11 @@ def __call__(self, img: NdarrayTensor) -> NdarrayTensor:
"""
if isinstance(img, np.ndarray):
return fill_holes(img, self.applied_labels, self.connectivity)
elif isinstance(img, torch.Tensor):
if isinstance(img, torch.Tensor):
img_arr = img.detach().cpu().numpy()
img_arr = self(img_arr)
return torch.as_tensor(img_arr, device=img.device)
else:
raise NotImplementedError(f"{self.__class__} can not handle data of type {type(img)}.")
raise NotImplementedError(f"{self.__class__} can not handle data of type {type(img)}.")


class LabelToContour(Transform):
Expand Down
3 changes: 1 addition & 2 deletions monai/transforms/utility/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -1001,8 +1001,7 @@ def __call__(
def _compute(op: Callable, data: np.ndarray):
if self.channel_wise:
return [op(c) for c in data]
else:
return op(data)
return op(data)

custom_index = 0
for o in self.ops:
Expand Down
10 changes: 5 additions & 5 deletions monai/transforms/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ def generate_label_classes_crop_centers(
ratios_: List[Union[float, int]] = ([1] * len(indices)) if ratios is None else ratios
if len(ratios_) != len(indices):
raise ValueError("random crop radios must match the number of indices of classes.")
if any([i < 0 for i in ratios_]):
if any(i < 0 for i in ratios_):
raise ValueError("ratios should not contain negative number.")

# ensure indices are numpy array
Expand Down Expand Up @@ -1043,7 +1043,7 @@ def convert_to_tensor(data):
"""
if isinstance(data, torch.Tensor):
return data.contiguous()
elif isinstance(data, np.ndarray):
if isinstance(data, np.ndarray):
# skip array of string classes and object, refer to:
# https://github.com/pytorch/pytorch/blob/v1.9.0/torch/utils/data/_utils/collate.py#L13
if re.search(r"[SaUO]", data.dtype.str) is None:
Expand Down Expand Up @@ -1107,11 +1107,11 @@ def tensor_to_numpy(data):
if isinstance(data, torch.Tensor):
# invert Tensor to numpy, if scalar data, convert to number
return data.item() if data.ndim == 0 else np.ascontiguousarray(data.detach().cpu().numpy())
elif isinstance(data, dict):
if isinstance(data, dict):
return {k: tensor_to_numpy(v) for k, v in data.items()}
elif isinstance(data, list):
if isinstance(data, list):
return [tensor_to_numpy(i) for i in data]
elif isinstance(data, tuple):
if isinstance(data, tuple):
return tuple(tensor_to_numpy(i) for i in data)

return data
5 changes: 2 additions & 3 deletions monai/utils/deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,8 @@ def _wrapper(*args, **kwargs):

if is_func:
return _wrapper
else:
obj.__init__ = _wrapper
return obj
obj.__init__ = _wrapper
return obj

return _decorator

Expand Down
2 changes: 1 addition & 1 deletion monai/utils/dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def get_dist_device():
backend = dist.get_backend()
if backend == "nccl" and torch.cuda.is_available():
return torch.device(f"cuda:{torch.cuda.current_device()}")
elif backend == "gloo":
if backend == "gloo":
return torch.device("cpu")
return None

Expand Down
3 changes: 1 addition & 2 deletions monai/utils/jupyter_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,8 +224,7 @@ def _get_loss(data):

if isinstance(output, list):
return _get_loss(output[0])
else:
return _get_loss(output)
return _get_loss(output)


class StatusMembers(Enum):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_gibbs_noised.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def get_data(im_shape, as_tensor_input):
create_test_image = create_test_image_2d if len(im_shape) == 2 else create_test_image_3d
ims = create_test_image(*im_shape, rad_max=20, noise_max=0.0, num_seg_classes=5)
ims = [torch.Tensor(im) for im in ims] if as_tensor_input else ims
return {k: v for k, v in zip(KEYS, ims)}
return dict(zip(KEYS, ims))

@parameterized.expand(TEST_CASES)
def test_same_result(self, im_shape, as_tensor_output, as_tensor_input):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_k_space_spike_noised.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def get_data(im_shape, as_tensor_input):
ims = create_test_image(*im_shape, rad_max=20, noise_max=0.0, num_seg_classes=5)
ims = [im[None] for im in ims]
ims = [torch.Tensor(im) for im in ims] if as_tensor_input else ims
return {k: v for k, v in zip(KEYS, ims)}
return dict(zip(KEYS, ims))

@parameterized.expand(TEST_CASES)
def test_same_result(self, im_shape, as_tensor_output, as_tensor_input):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_rand_gibbs_noised.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def get_data(im_shape, as_tensor_input):
create_test_image = create_test_image_2d if len(im_shape) == 2 else create_test_image_3d
ims = create_test_image(*im_shape, rad_max=20, noise_max=0.0, num_seg_classes=5)
ims = [torch.Tensor(im) for im in ims] if as_tensor_input else ims
return {k: v for k, v in zip(KEYS, ims)}
return dict(zip(KEYS, ims))

@parameterized.expand(TEST_CASES)
def test_0_prob(self, im_shape, as_tensor_output, as_tensor_input):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_rand_k_space_spike_noised.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def get_data(im_shape, as_tensor_input):
ims = create_test_image(*im_shape, rad_max=20, noise_max=0.0, num_seg_classes=5)
ims = [im[None] for im in ims]
ims = [torch.Tensor(im) for im in ims] if as_tensor_input else ims
return {k: v for k, v in zip(KEYS, ims)}
return dict(zip(KEYS, ims))

@parameterized.expand(TEST_CASES)
def test_same_result(self, im_shape, as_tensor_output, as_tensor_input):
Expand Down