Skip to content
  •  
  •  
  •  
2 changes: 2 additions & 0 deletions monai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import os
import sys

Expand Down
2 changes: 2 additions & 0 deletions monai/_extensions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

from .loader import load_module
7 changes: 3 additions & 4 deletions monai/_extensions/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import platform
from _thread import interrupt_main
from contextlib import contextmanager
from glob import glob
from os import path
from threading import Timer
from typing import Optional

import torch

Expand Down Expand Up @@ -44,9 +45,7 @@ def timeout(time, message):
pass


def load_module(
module_name: str, defines: Optional[dict] = None, verbose_build: bool = False, build_timeout: int = 300
):
def load_module(module_name: str, defines: dict | None = None, verbose_build: bool = False, build_timeout: int = 300):
"""
Handles the loading of c++ extension modules.

Expand Down
2 changes: 2 additions & 0 deletions monai/apps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

from .datasets import CrossValidation, DecathlonDataset, MedNISTDataset, TciaDataset
from .mmars import MODEL_DESC, RemoteMMARKeys, download_mmar, get_model_spec, load_from_mmar
from .utils import SUPPORTED_HASH_TYPES, check_hash, download_and_extract, download_url, extractall, get_logger, logger
2 changes: 2 additions & 0 deletions monai/apps/auto3dseg/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

from .auto_runner import AutoRunner
from .bundle_gen import BundleAlgo, BundleGen
from .data_analyzer import DataAnalyzer
Expand Down
2 changes: 2 additions & 0 deletions monai/apps/auto3dseg/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

from monai.apps.auto3dseg.auto_runner import AutoRunner
from monai.apps.auto3dseg.bundle_gen import BundleAlgo, BundleGen
from monai.apps.auto3dseg.data_analyzer import DataAnalyzer
Expand Down
36 changes: 19 additions & 17 deletions monai/apps/auto3dseg/auto_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import os
import shutil
import subprocess
from copy import deepcopy
from time import sleep
from typing import Any, Dict, List, Optional, Union, cast
from typing import Any, cast

import numpy as np
import torch
Expand Down Expand Up @@ -204,21 +206,21 @@ class AutoRunner:

"""

analyze_params: Optional[Dict]
analyze_params: dict | None

def __init__(
self,
work_dir: str = "./work_dir",
input: Union[Dict[str, Any], str, None] = None,
algos: Optional[Union[Dict, List, str]] = None,
analyze: Optional[bool] = None,
algo_gen: Optional[bool] = None,
train: Optional[bool] = None,
input: dict[str, Any] | str | None = None,
algos: dict | list | str | None = None,
analyze: bool | None = None,
algo_gen: bool | None = None,
train: bool | None = None,
hpo: bool = False,
hpo_backend: str = "nni",
ensemble: bool = True,
not_use_cache: bool = False,
templates_path_or_url: Optional[str] = None,
templates_path_or_url: str | None = None,
**kwargs,
):

Expand All @@ -234,7 +236,7 @@ def __init__(
input = self.data_src_cfg_name
logger.info(f"Input config is not provided, using the default {input}")

if isinstance(input, Dict):
if isinstance(input, dict):
self.data_src_cfg = input
ConfigParser.export_config_file(
config=input, filepath=self.data_src_cfg_name, fmt="yaml", default_flow_style=None, sort_keys=False
Expand Down Expand Up @@ -279,14 +281,14 @@ def __init__(
self.set_num_fold(num_fold=self.num_fold)

self.gpu_customization = False
self.gpu_customization_specs: Dict[str, Any] = {}
self.gpu_customization_specs: dict[str, Any] = {}

# hpo
if hpo_backend.lower() != "nni":
raise NotImplementedError("HPOGen backend only supports NNI")
self.hpo = hpo and has_nni
self.set_hpo_params()
self.search_space: Dict[str, Dict[str, Any]] = {}
self.search_space: dict[str, dict[str, Any]] = {}
self.hpo_tasks = 0

def read_cache(self):
Expand Down Expand Up @@ -336,7 +338,7 @@ def export_cache(self, **kwargs):
)

def set_gpu_customization(
self, gpu_customization: bool = False, gpu_customization_specs: Optional[Dict[str, Any]] = None
self, gpu_customization: bool = False, gpu_customization_specs: dict[str, Any] | None = None
):
"""
Set options for GPU-based parameter customization/optimization.
Expand Down Expand Up @@ -389,7 +391,7 @@ def set_num_fold(self, num_fold: int = 5):
if self.ensemble_method_name == "AlgoEnsembleBestByFold":
self.ensemble_method.n_fold = self.num_fold # type: ignore

def set_training_params(self, params: Optional[Dict[str, Any]] = None):
def set_training_params(self, params: dict[str, Any] | None = None):
"""
Set the training params for all algos.

Expand All @@ -404,7 +406,7 @@ def set_training_params(self, params: Optional[Dict[str, Any]] = None):
"""
self.train_params = deepcopy(params) if params is not None else {}

def set_prediction_params(self, params: Optional[Dict[str, Any]] = None):
def set_prediction_params(self, params: dict[str, Any] | None = None):
"""
Set the prediction params for all algos.

Expand All @@ -420,7 +422,7 @@ def set_prediction_params(self, params: Optional[Dict[str, Any]] = None):
"""
self.pred_params = deepcopy(params) if params is not None else {}

def set_analyze_params(self, params: Optional[Dict[str, Any]] = None):
def set_analyze_params(self, params: dict[str, Any] | None = None):
"""
Set the data analysis extra params.

Expand All @@ -438,7 +440,7 @@ def set_analyze_params(self, params: Optional[Dict[str, Any]] = None):
else:
self.analyze_params = deepcopy(params)

def set_hpo_params(self, params: Optional[Dict[str, Any]] = None):
def set_hpo_params(self, params: dict[str, Any] | None = None):
"""
Set parameters for the HPO module and the algos before the training. It will attempt to (1) override bundle
templates with the key-value pairs in ``params`` (2) change the config of the HPO module (e.g. NNI) if the
Expand Down Expand Up @@ -536,7 +538,7 @@ def set_ensemble_method(self, ensemble_method_name: str = "AlgoEnsembleBestByFol
else:
raise NotImplementedError(f"Ensemble method {self.ensemble_method_name} is not implemented.")

def _train_algo_in_sequence(self, history: List[Dict[str, Any]]):
def _train_algo_in_sequence(self, history: list[dict[str, Any]]):
"""
Train the Algos in a sequential scheme. The order of training is randomized.

Expand Down
19 changes: 11 additions & 8 deletions monai/apps/auto3dseg/bundle_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import importlib
import os
import shutil
import subprocess
import sys
import time
import warnings
from collections.abc import Mapping
from copy import deepcopy
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Dict, List, Mapping, Optional, Union
from typing import Any
from urllib.parse import urlparse

import torch
Expand Down Expand Up @@ -369,10 +372,10 @@ class BundleGen(AlgoGen):
def __init__(
self,
algo_path: str = ".",
algos: Optional[Union[Dict, List, str]] = None,
templates_path_or_url: Optional[str] = None,
data_stats_filename: Optional[str] = None,
data_src_cfg_name: Optional[str] = None,
algos: dict | list | str | None = None,
templates_path_or_url: str | None = None,
data_stats_filename: str | None = None,
data_src_cfg_name: str | None = None,
):

if algos is None or isinstance(algos, (list, tuple, str)):
Expand Down Expand Up @@ -426,7 +429,7 @@ def __init__(

self.data_stats_filename = data_stats_filename
self.data_src_cfg_filename = data_src_cfg_name
self.history: List[Dict] = []
self.history: list[dict] = []

def set_data_stats(self, data_stats_filename: str):
"""
Expand Down Expand Up @@ -454,7 +457,7 @@ def get_data_src(self):
"""Get the data source filename"""
return self.data_src_cfg_filename

def get_history(self) -> List:
def get_history(self) -> list:
"""get the history of the bundleAlgo object with their names/identifiers"""
return self.history

Expand All @@ -463,7 +466,7 @@ def generate(
output_folder=".",
num_fold: int = 5,
gpu_customization: bool = False,
gpu_customization_specs: Optional[Dict[str, Any]] = None,
gpu_customization_specs: dict[str, Any] | None = None,
):
"""
Generate the bundle scripts/configs for each bundleAlgo
Expand Down
22 changes: 12 additions & 10 deletions monai/apps/auto3dseg/data_analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import warnings
from os import path
from typing import Any, Dict, List, Optional, Union, cast
from typing import Any, cast

import numpy as np
import torch
Expand Down Expand Up @@ -111,18 +113,18 @@ class DataAnalyzer:

def __init__(
self,
datalist: Union[str, Dict],
datalist: str | dict,
dataroot: str = "",
output_path: str = "./data_stats.yaml",
average: bool = True,
do_ccp: bool = False,
device: Union[str, torch.device] = "cpu",
device: str | torch.device = "cpu",
worker: int = 2,
image_key: str = "image",
label_key: Optional[str] = "label",
hist_bins: Optional[Union[list, int]] = 0,
hist_range: Optional[list] = None,
fmt: Optional[str] = "yaml",
label_key: str | None = "label",
hist_bins: list | int | None = 0,
hist_range: list | None = None,
fmt: str | None = "yaml",
histogram_only: bool = False,
**extra_params,
):
Expand All @@ -146,7 +148,7 @@ def __init__(
self.extra_params = extra_params

@staticmethod
def _check_data_uniformity(keys: List[str], result: Dict):
def _check_data_uniformity(keys: list[str], result: dict):
"""
Check data uniformity since DataAnalyzer provides no support to multi-modal images with different
affine matrices/spacings due to monai transforms.
Expand Down Expand Up @@ -227,7 +229,7 @@ def get_all_case_stats(self, key="training", transform_list=None):
files, _ = datafold_read(datalist=self.datalist, basedir=self.dataroot, fold=-1, key=key)
dataset = Dataset(data=files, transform=transform)
dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=self.worker, collate_fn=no_collation)
result: Dict[DataStatsKeys, Any] = {DataStatsKeys.SUMMARY: {}, DataStatsKeys.BY_CASE: []}
result: dict[DataStatsKeys, Any] = {DataStatsKeys.SUMMARY: {}, DataStatsKeys.BY_CASE: []}

if not has_tqdm:
warnings.warn("tqdm is not installed. not displaying the caching progress.")
Expand Down Expand Up @@ -261,7 +263,7 @@ def get_all_case_stats(self, key="training", transform_list=None):
)
result[DataStatsKeys.BY_CASE].append(stats_by_cases)

result[DataStatsKeys.SUMMARY] = summarizer.summarize(cast(List, result[DataStatsKeys.BY_CASE]))
result[DataStatsKeys.SUMMARY] = summarizer.summarize(cast(list, result[DataStatsKeys.BY_CASE]))

if not self._check_data_uniformity([ImageStatsKeys.SPACING], result):
print("Data spacing is not completely uniform. MONAI transforms may provide unexpected result")
Expand Down
19 changes: 11 additions & 8 deletions monai/apps/auto3dseg/ensemble_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import os
from abc import ABC, abstractmethod
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Dict, List, Optional, Sequence, Union
from typing import Any
from warnings import warn

import numpy as np
Expand Down Expand Up @@ -67,7 +70,7 @@ def get_algo_ensemble(self):
"""
return self.algo_ensemble

def set_infer_files(self, dataroot: str, data_list_or_path: Union[str, List], data_key: str = "testing"):
def set_infer_files(self, dataroot: str, data_list_or_path: str | list, data_key: str = "testing"):
"""
Set the files to perform model inference.

Expand All @@ -78,7 +81,7 @@ def set_infer_files(self, dataroot: str, data_list_or_path: Union[str, List], da

self.infer_files = []

if isinstance(data_list_or_path, List):
if isinstance(data_list_or_path, list):
self.infer_files = data_list_or_path
elif isinstance(data_list_or_path, str):
datalist = ConfigParser.load_config_file(data_list_or_path)
Expand Down Expand Up @@ -113,7 +116,7 @@ def ensemble_pred(self, preds, sigmoid=False):
else:
return VoteEnsemble(num_classes=preds[0].shape[0])(classes)

def __call__(self, pred_param: Optional[Dict[str, Any]] = None):
def __call__(self, pred_param: dict[str, Any] | None = None):
"""
Use the ensembled model to predict result.

Expand Down Expand Up @@ -233,7 +236,7 @@ def collect_algos(self) -> None:
self.algo_ensemble = []
for f_idx in range(self.n_fold):
best_score = -1.0
best_model: Optional[BundleAlgo] = None
best_model: BundleAlgo | None = None
for algo in self.algos:
# algorithm folder: {net}_{fold_index}_{other}
identifier = algo[AlgoEnsembleKeys.ID].split("_")[1]
Expand Down Expand Up @@ -264,8 +267,8 @@ class AlgoEnsembleBuilder:

"""

def __init__(self, history: Sequence[Dict], data_src_cfg_filename: Optional[str] = None):
self.infer_algos: List[Dict[AlgoEnsembleKeys, Any]] = []
def __init__(self, history: Sequence[dict], data_src_cfg_filename: str | None = None):
self.infer_algos: list[dict[AlgoEnsembleKeys, Any]] = []
self.ensemble: AlgoEnsemble
self.data_src_cfg = ConfigParser(globals=False)

Expand All @@ -292,7 +295,7 @@ def __init__(self, history: Sequence[Dict], data_src_cfg_filename: Optional[str]

self.add_inferer(name, gen_algo, best_metric)

def add_inferer(self, identifier: str, gen_algo: BundleAlgo, best_metric: Optional[float] = None):
def add_inferer(self, identifier: str, gen_algo: BundleAlgo, best_metric: float | None = None):
"""
Add model inferer to the builder.

Expand Down
Loading