From 129dc88398de083031a2e454be364911b85c4e79 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Wed, 13 Mar 2024 15:16:12 +0800 Subject: [PATCH 01/16] add properties_path Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/bundle/workflows.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/monai/bundle/workflows.py b/monai/bundle/workflows.py index da3aa30141..006ec06ced 100644 --- a/monai/bundle/workflows.py +++ b/monai/bundle/workflows.py @@ -11,6 +11,7 @@ from __future__ import annotations +import json import os import sys import time @@ -20,6 +21,7 @@ from pathlib import Path from typing import Any, Sequence +from monai.config import PathLike from monai.apps.utils import get_logger from monai.bundle.config_parser import ConfigParser from monai.bundle.properties import InferProperties, MetaProperties, TrainProperties @@ -46,6 +48,7 @@ class BundleWorkflow(ABC): or "infer", "inference", "eval", "evaluation" for a inference workflow, other unsupported string will raise a ValueError. default to `None` for common workflow. + properties_path: the path to the JSON file of properties. """ @@ -59,12 +62,20 @@ class BundleWorkflow(ABC): new_name="workflow_type", msg_suffix="please use `workflow_type` instead.", ) - def __init__(self, workflow_type: str | None = None, workflow: str | None = None): + def __init__(self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None): workflow_type = workflow if workflow is not None else workflow_type - if workflow_type is None: + if workflow_type is None and properties_path is None: self.properties = copy(MetaProperties) self.workflow_type = None return + if properties_path is not None: + properties_path = Path(properties_path) + if not properties_path.is_file(): + raise ValueError(f"Property file {properties_path} does not exist.") + with open(properties_path) as json_file: + self.properties = json.load(json_file) + self.workflow_type = None + return if workflow_type.lower() in self.supported_train_type: self.properties = {**TrainProperties, **MetaProperties} self.workflow_type = "train" @@ -206,6 +217,7 @@ class ConfigWorkflow(BundleWorkflow): or "infer", "inference", "eval", "evaluation" for a inference workflow, other unsupported string will raise a ValueError. default to `None` for common workflow. + properties_path: the path to the JSON file of properties. override: id-value pairs to override or add the corresponding config content. e.g. ``--net#input_chns 42``, ``--net %/data/other.json#net_arg`` @@ -230,10 +242,11 @@ def __init__( tracking: str | dict | None = None, workflow_type: str | None = None, workflow: str | None = None, + properties_path: PathLike | None = None, **override: Any, ) -> None: workflow_type = workflow if workflow is not None else workflow_type - super().__init__(workflow_type=workflow_type) + super().__init__(workflow_type=workflow_type, properties_path=properties_path) if config_file is not None: _config_files = ensure_tuple(config_file) self.config_root_path = Path(_config_files[0]).parent From 5aa4f381251315d6fbbd2d672666cf8d6fcbf347 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Wed, 13 Mar 2024 15:16:37 +0800 Subject: [PATCH 02/16] remove ignite based properties Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/bundle/properties.py | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/monai/bundle/properties.py b/monai/bundle/properties.py index a75e862a84..8ebffa9de4 100644 --- a/monai/bundle/properties.py +++ b/monai/bundle/properties.py @@ -43,11 +43,6 @@ BundleProperty.REQUIRED: True, BundlePropertyConfig.ID: "dataset_dir", }, - "trainer": { - BundleProperty.DESC: "training workflow engine.", - BundleProperty.REQUIRED: True, - BundlePropertyConfig.ID: f"train{ID_SEP_KEY}trainer", - }, "network_def": { BundleProperty.DESC: "network module for the training.", BundleProperty.REQUIRED: False, @@ -63,23 +58,12 @@ BundleProperty.REQUIRED: True, BundlePropertyConfig.ID: f"train{ID_SEP_KEY}dataset", }, - "train_inferer": { - BundleProperty.DESC: "MONAI Inferer object to execute the model computation in training.", - BundleProperty.REQUIRED: True, - BundlePropertyConfig.ID: f"train{ID_SEP_KEY}inferer", - }, "train_dataset_data": { BundleProperty.DESC: "data source for the training dataset.", BundleProperty.REQUIRED: False, BundlePropertyConfig.ID: f"train{ID_SEP_KEY}dataset{ID_SEP_KEY}data", BundlePropertyConfig.REF_ID: None, # no reference to this ID }, - "train_handlers": { - BundleProperty.DESC: "event-handlers for the training logic.", - BundleProperty.REQUIRED: False, - BundlePropertyConfig.ID: f"train{ID_SEP_KEY}handlers", - BundlePropertyConfig.REF_ID: f"train{ID_SEP_KEY}trainer{ID_SEP_KEY}train_handlers", - }, "train_preprocessing": { BundleProperty.DESC: "preprocessing for the training input data.", BundleProperty.REQUIRED: False, @@ -98,12 +82,6 @@ BundlePropertyConfig.ID: f"train{ID_SEP_KEY}key_metric", BundlePropertyConfig.REF_ID: f"train{ID_SEP_KEY}trainer{ID_SEP_KEY}key_train_metric", }, - "evaluator": { - BundleProperty.DESC: "validation workflow engine.", - BundleProperty.REQUIRED: False, - BundlePropertyConfig.ID: f"validate{ID_SEP_KEY}evaluator", - BundlePropertyConfig.REF_ID: "validator", # this REF_ID is the arg name of `ValidationHandler` - }, "val_interval": { BundleProperty.DESC: "validation interval during the training.", BundleProperty.REQUIRED: False, @@ -175,33 +153,17 @@ BundleProperty.REQUIRED: True, BundlePropertyConfig.ID: "dataset", }, - "evaluator": { - BundleProperty.DESC: "inference / evaluation workflow engine.", - BundleProperty.REQUIRED: True, - BundlePropertyConfig.ID: "evaluator", - }, "network_def": { BundleProperty.DESC: "network module for the inference.", BundleProperty.REQUIRED: True, BundlePropertyConfig.ID: "network_def", }, - "inferer": { - BundleProperty.DESC: "MONAI Inferer object to execute the model computation in inference.", - BundleProperty.REQUIRED: True, - BundlePropertyConfig.ID: "inferer", - }, "dataset_data": { BundleProperty.DESC: "data source for the inference / evaluation dataset.", BundleProperty.REQUIRED: False, BundlePropertyConfig.ID: f"dataset{ID_SEP_KEY}data", BundlePropertyConfig.REF_ID: None, # no reference to this ID }, - "handlers": { - BundleProperty.DESC: "event-handlers for the inference / evaluation logic.", - BundleProperty.REQUIRED: False, - BundlePropertyConfig.ID: "handlers", - BundlePropertyConfig.REF_ID: f"evaluator{ID_SEP_KEY}val_handlers", - }, "preprocessing": { BundleProperty.DESC: "preprocessing for the input data.", BundleProperty.REQUIRED: False, From b2ea289398bf50fabc65ca41617ad5b70ccdea0a Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Wed, 13 Mar 2024 15:59:26 +0800 Subject: [PATCH 03/16] fix unittest Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/bundle/workflows.py | 6 +- tests/test_bundle_workflow.py | 10 +- tests/testing_data/fl_train_properties.json | 126 ++++++++++++++++++++ 3 files changed, 138 insertions(+), 4 deletions(-) create mode 100644 tests/testing_data/fl_train_properties.json diff --git a/monai/bundle/workflows.py b/monai/bundle/workflows.py index 006ec06ced..0b2e1c7742 100644 --- a/monai/bundle/workflows.py +++ b/monai/bundle/workflows.py @@ -21,11 +21,11 @@ from pathlib import Path from typing import Any, Sequence -from monai.config import PathLike from monai.apps.utils import get_logger from monai.bundle.config_parser import ConfigParser from monai.bundle.properties import InferProperties, MetaProperties, TrainProperties from monai.bundle.utils import DEFAULT_EXP_MGMT_SETTINGS, EXPR_KEY, ID_REF_KEY, ID_SEP_KEY +from monai.config import PathLike from monai.utils import BundleProperty, BundlePropertyConfig, deprecated_arg, deprecated_arg_default, ensure_tuple __all__ = ["BundleWorkflow", "ConfigWorkflow"] @@ -62,7 +62,9 @@ class BundleWorkflow(ABC): new_name="workflow_type", msg_suffix="please use `workflow_type` instead.", ) - def __init__(self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None): + def __init__( + self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None + ): workflow_type = workflow if workflow is not None else workflow_type if workflow_type is None and properties_path is None: self.properties = copy(MetaProperties) diff --git a/tests/test_bundle_workflow.py b/tests/test_bundle_workflow.py index f7da37acef..6ed33b085e 100644 --- a/tests/test_bundle_workflow.py +++ b/tests/test_bundle_workflow.py @@ -33,7 +33,10 @@ TEST_CASE_2 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.yaml")] -TEST_CASE_3 = [os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json")] +TEST_CASE_3 = [ + os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json"), + os.path.join(os.path.dirname(__file__), "testing_data", "fl_train_properties.json"), +] class TestBundleWorkflow(unittest.TestCase): @@ -101,10 +104,11 @@ def test_inference_config(self, config_file): logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"), **override, ) + inferer.add_property(name="inferer", required=True, config_id="inferer") self._test_inferer(inferer) @parameterized.expand([TEST_CASE_3]) - def test_train_config(self, config_file): + def test_train_config(self, config_file, properties_path): # test standard MONAI model-zoo config workflow trainer = ConfigWorkflow( workflow_type="train", @@ -113,6 +117,7 @@ def test_train_config(self, config_file): init_id="initialize", run_id="run", final_id="finalize", + properties_path=properties_path, ) # should initialize before parsing any bundle content trainer.initialize() @@ -144,6 +149,7 @@ def test_train_config(self, config_file): def test_non_config(self): # test user defined python style workflow inferer = NonConfigWorkflow(self.filename, self.data_dir) + inferer.add_property(name="inferer", required=True) self._test_inferer(inferer) diff --git a/tests/testing_data/fl_train_properties.json b/tests/testing_data/fl_train_properties.json new file mode 100644 index 0000000000..fa91cfbde0 --- /dev/null +++ b/tests/testing_data/fl_train_properties.json @@ -0,0 +1,126 @@ +{ + "bundle_root": { + "description": "root path of the bundle.", + "required": true, + "id": "bundle_root" + }, + "device": { + "description": "target device to execute the bundle workflow.", + "required": true, + "id": "device" + }, + "dataset_dir": { + "description": "directory path of the dataset.", + "required": true, + "id": "dataset_dir" + }, + "trainer": { + "description": "training workflow engine.", + "required": true, + "id": "train::trainer" + }, + "network_def": { + "description": "network module for the training.", + "required": false, + "id": "network_def" + }, + "max_epochs": { + "description": "max number of epochs to execute the training.", + "required": true, + "id": "train::trainer::max_epochs" + }, + "train_dataset": { + "description": "PyTorch dataset object for the training logic.", + "required": true, + "id": "train::dataset" + }, + "train_inferer": { + "description": "MONAI Inferer object to execute the model computation in training.", + "required": true, + "id": "train::inferer" + }, + "train_dataset_data": { + "description": "data source for the training dataset.", + "required": false, + "id": "train::dataset::data", + "refer_id": null + }, + "train_handlers": { + "description": "event-handlers for the training logic.", + "required": false, + "id": "train::handlers", + "refer_id": "train::trainer::train_handlers" + }, + "train_preprocessing": { + "description": "preprocessing for the training input data.", + "required": false, + "id": "train::preprocessing", + "refer_id": "train::dataset::transform" + }, + "train_postprocessing": { + "description": "postprocessing for the training model output data.", + "required": false, + "id": "train::postprocessing", + "refer_id": "train::trainer::postprocessing" + }, + "train_key_metric": { + "description": "key metric to compute on the training data.", + "required": false, + "id": "train::key_metric", + "refer_id": "train::trainer::key_train_metric" + }, + "evaluator": { + "description": "validation workflow engine.", + "required": false, + "id": "validate::evaluator", + "refer_id": "validator" + }, + "val_interval": { + "description": "validation interval during the training.", + "required": false, + "id": "val_interval", + "refer_id": "interval" + }, + "val_handlers": { + "description": "event-handlers for the validation logic.", + "required": false, + "id": "validate::handlers", + "refer_id": "validate::evaluator::val_handlers" + }, + "val_dataset": { + "description": "PyTorch dataset object for the validation logic.", + "required": false, + "id": "validate::dataset", + "refer_id": "validate::dataloader::dataset" + }, + "val_dataset_data": { + "description": "data source for the validation dataset.", + "required": false, + "id": "validate::dataset::data", + "refer_id": null + }, + "val_inferer": { + "description": "MONAI Inferer object to execute the model computation in validation.", + "required": false, + "id": "validate::inferer", + "refer_id": "validate::evaluator::inferer" + }, + "val_preprocessing": { + "description": "preprocessing for the validation input data.", + "required": false, + "id": "validate::preprocessing", + "refer_id": "validate::dataset::transform" + }, + "val_postprocessing": { + "description": "postprocessing for the validation model output data.", + "required": false, + "id": "validate::postprocessing", + "refer_id": "validate::evaluator::postprocessing" + }, + "val_key_metric": { + "description": "key metric to compute on the validation data.", + "required": false, + "id": "validate::key_metric", + "refer_id": "validate::evaluator::key_val_metric" + } +} \ No newline at end of file From cf34758da215f8063bbcdaa7f077cb1f84e956b1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 13 Mar 2024 08:04:44 +0000 Subject: [PATCH 04/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/testing_data/fl_train_properties.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testing_data/fl_train_properties.json b/tests/testing_data/fl_train_properties.json index fa91cfbde0..1ca9319cd8 100644 --- a/tests/testing_data/fl_train_properties.json +++ b/tests/testing_data/fl_train_properties.json @@ -123,4 +123,4 @@ "id": "validate::key_metric", "refer_id": "validate::evaluator::key_val_metric" } -} \ No newline at end of file +} From 67670a1cb886113770550e52a6f433e25e723092 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:25:10 +0800 Subject: [PATCH 05/16] fix ci Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/bundle/workflows.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/bundle/workflows.py b/monai/bundle/workflows.py index 0b2e1c7742..135ddcb1c2 100644 --- a/monai/bundle/workflows.py +++ b/monai/bundle/workflows.py @@ -78,10 +78,10 @@ def __init__( self.properties = json.load(json_file) self.workflow_type = None return - if workflow_type.lower() in self.supported_train_type: + if workflow_type.lower() in self.supported_train_type: # type: ignore[union-attr] self.properties = {**TrainProperties, **MetaProperties} self.workflow_type = "train" - elif workflow_type.lower() in self.supported_infer_type: + elif workflow_type.lower() in self.supported_infer_type: # type: ignore[union-attr] self.properties = {**InferProperties, **MetaProperties} self.workflow_type = "infer" else: From 06d9a3484717ead6680f32dea331e21a9d176b74 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 26 Mar 2024 22:59:22 +0800 Subject: [PATCH 06/16] Revert "Merge remote-tracking branch 'yliu/bundle' into bundle" This reverts commit e941dde725dc6ba2d669563a329c205498dd8dcc, reversing changes made to 5aa4f381251315d6fbbd2d672666cf8d6fcbf347. --- monai/bundle/workflows.py | 10 +- tests/test_bundle_workflow.py | 10 +- tests/testing_data/fl_train_properties.json | 126 -------------------- 3 files changed, 6 insertions(+), 140 deletions(-) delete mode 100644 tests/testing_data/fl_train_properties.json diff --git a/monai/bundle/workflows.py b/monai/bundle/workflows.py index 135ddcb1c2..006ec06ced 100644 --- a/monai/bundle/workflows.py +++ b/monai/bundle/workflows.py @@ -21,11 +21,11 @@ from pathlib import Path from typing import Any, Sequence +from monai.config import PathLike from monai.apps.utils import get_logger from monai.bundle.config_parser import ConfigParser from monai.bundle.properties import InferProperties, MetaProperties, TrainProperties from monai.bundle.utils import DEFAULT_EXP_MGMT_SETTINGS, EXPR_KEY, ID_REF_KEY, ID_SEP_KEY -from monai.config import PathLike from monai.utils import BundleProperty, BundlePropertyConfig, deprecated_arg, deprecated_arg_default, ensure_tuple __all__ = ["BundleWorkflow", "ConfigWorkflow"] @@ -62,9 +62,7 @@ class BundleWorkflow(ABC): new_name="workflow_type", msg_suffix="please use `workflow_type` instead.", ) - def __init__( - self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None - ): + def __init__(self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None): workflow_type = workflow if workflow is not None else workflow_type if workflow_type is None and properties_path is None: self.properties = copy(MetaProperties) @@ -78,10 +76,10 @@ def __init__( self.properties = json.load(json_file) self.workflow_type = None return - if workflow_type.lower() in self.supported_train_type: # type: ignore[union-attr] + if workflow_type.lower() in self.supported_train_type: self.properties = {**TrainProperties, **MetaProperties} self.workflow_type = "train" - elif workflow_type.lower() in self.supported_infer_type: # type: ignore[union-attr] + elif workflow_type.lower() in self.supported_infer_type: self.properties = {**InferProperties, **MetaProperties} self.workflow_type = "infer" else: diff --git a/tests/test_bundle_workflow.py b/tests/test_bundle_workflow.py index 6ed33b085e..f7da37acef 100644 --- a/tests/test_bundle_workflow.py +++ b/tests/test_bundle_workflow.py @@ -33,10 +33,7 @@ TEST_CASE_2 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.yaml")] -TEST_CASE_3 = [ - os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json"), - os.path.join(os.path.dirname(__file__), "testing_data", "fl_train_properties.json"), -] +TEST_CASE_3 = [os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json")] class TestBundleWorkflow(unittest.TestCase): @@ -104,11 +101,10 @@ def test_inference_config(self, config_file): logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"), **override, ) - inferer.add_property(name="inferer", required=True, config_id="inferer") self._test_inferer(inferer) @parameterized.expand([TEST_CASE_3]) - def test_train_config(self, config_file, properties_path): + def test_train_config(self, config_file): # test standard MONAI model-zoo config workflow trainer = ConfigWorkflow( workflow_type="train", @@ -117,7 +113,6 @@ def test_train_config(self, config_file, properties_path): init_id="initialize", run_id="run", final_id="finalize", - properties_path=properties_path, ) # should initialize before parsing any bundle content trainer.initialize() @@ -149,7 +144,6 @@ def test_train_config(self, config_file, properties_path): def test_non_config(self): # test user defined python style workflow inferer = NonConfigWorkflow(self.filename, self.data_dir) - inferer.add_property(name="inferer", required=True) self._test_inferer(inferer) diff --git a/tests/testing_data/fl_train_properties.json b/tests/testing_data/fl_train_properties.json deleted file mode 100644 index 1ca9319cd8..0000000000 --- a/tests/testing_data/fl_train_properties.json +++ /dev/null @@ -1,126 +0,0 @@ -{ - "bundle_root": { - "description": "root path of the bundle.", - "required": true, - "id": "bundle_root" - }, - "device": { - "description": "target device to execute the bundle workflow.", - "required": true, - "id": "device" - }, - "dataset_dir": { - "description": "directory path of the dataset.", - "required": true, - "id": "dataset_dir" - }, - "trainer": { - "description": "training workflow engine.", - "required": true, - "id": "train::trainer" - }, - "network_def": { - "description": "network module for the training.", - "required": false, - "id": "network_def" - }, - "max_epochs": { - "description": "max number of epochs to execute the training.", - "required": true, - "id": "train::trainer::max_epochs" - }, - "train_dataset": { - "description": "PyTorch dataset object for the training logic.", - "required": true, - "id": "train::dataset" - }, - "train_inferer": { - "description": "MONAI Inferer object to execute the model computation in training.", - "required": true, - "id": "train::inferer" - }, - "train_dataset_data": { - "description": "data source for the training dataset.", - "required": false, - "id": "train::dataset::data", - "refer_id": null - }, - "train_handlers": { - "description": "event-handlers for the training logic.", - "required": false, - "id": "train::handlers", - "refer_id": "train::trainer::train_handlers" - }, - "train_preprocessing": { - "description": "preprocessing for the training input data.", - "required": false, - "id": "train::preprocessing", - "refer_id": "train::dataset::transform" - }, - "train_postprocessing": { - "description": "postprocessing for the training model output data.", - "required": false, - "id": "train::postprocessing", - "refer_id": "train::trainer::postprocessing" - }, - "train_key_metric": { - "description": "key metric to compute on the training data.", - "required": false, - "id": "train::key_metric", - "refer_id": "train::trainer::key_train_metric" - }, - "evaluator": { - "description": "validation workflow engine.", - "required": false, - "id": "validate::evaluator", - "refer_id": "validator" - }, - "val_interval": { - "description": "validation interval during the training.", - "required": false, - "id": "val_interval", - "refer_id": "interval" - }, - "val_handlers": { - "description": "event-handlers for the validation logic.", - "required": false, - "id": "validate::handlers", - "refer_id": "validate::evaluator::val_handlers" - }, - "val_dataset": { - "description": "PyTorch dataset object for the validation logic.", - "required": false, - "id": "validate::dataset", - "refer_id": "validate::dataloader::dataset" - }, - "val_dataset_data": { - "description": "data source for the validation dataset.", - "required": false, - "id": "validate::dataset::data", - "refer_id": null - }, - "val_inferer": { - "description": "MONAI Inferer object to execute the model computation in validation.", - "required": false, - "id": "validate::inferer", - "refer_id": "validate::evaluator::inferer" - }, - "val_preprocessing": { - "description": "preprocessing for the validation input data.", - "required": false, - "id": "validate::preprocessing", - "refer_id": "validate::dataset::transform" - }, - "val_postprocessing": { - "description": "postprocessing for the validation model output data.", - "required": false, - "id": "validate::postprocessing", - "refer_id": "validate::evaluator::postprocessing" - }, - "val_key_metric": { - "description": "key metric to compute on the validation data.", - "required": false, - "id": "validate::key_metric", - "refer_id": "validate::evaluator::key_val_metric" - } -} From 22735af496515943803fa4ed2a63655b2df38d3b Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 26 Mar 2024 23:01:22 +0800 Subject: [PATCH 07/16] Revert "Revert "Merge remote-tracking branch 'yliu/bundle' into bundle"" This reverts commit 06d9a3484717ead6680f32dea331e21a9d176b74. --- monai/bundle/workflows.py | 10 +- tests/test_bundle_workflow.py | 10 +- tests/testing_data/fl_train_properties.json | 126 ++++++++++++++++++++ 3 files changed, 140 insertions(+), 6 deletions(-) create mode 100644 tests/testing_data/fl_train_properties.json diff --git a/monai/bundle/workflows.py b/monai/bundle/workflows.py index 006ec06ced..135ddcb1c2 100644 --- a/monai/bundle/workflows.py +++ b/monai/bundle/workflows.py @@ -21,11 +21,11 @@ from pathlib import Path from typing import Any, Sequence -from monai.config import PathLike from monai.apps.utils import get_logger from monai.bundle.config_parser import ConfigParser from monai.bundle.properties import InferProperties, MetaProperties, TrainProperties from monai.bundle.utils import DEFAULT_EXP_MGMT_SETTINGS, EXPR_KEY, ID_REF_KEY, ID_SEP_KEY +from monai.config import PathLike from monai.utils import BundleProperty, BundlePropertyConfig, deprecated_arg, deprecated_arg_default, ensure_tuple __all__ = ["BundleWorkflow", "ConfigWorkflow"] @@ -62,7 +62,9 @@ class BundleWorkflow(ABC): new_name="workflow_type", msg_suffix="please use `workflow_type` instead.", ) - def __init__(self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None): + def __init__( + self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None + ): workflow_type = workflow if workflow is not None else workflow_type if workflow_type is None and properties_path is None: self.properties = copy(MetaProperties) @@ -76,10 +78,10 @@ def __init__(self, workflow_type: str | None = None, workflow: str | None = None self.properties = json.load(json_file) self.workflow_type = None return - if workflow_type.lower() in self.supported_train_type: + if workflow_type.lower() in self.supported_train_type: # type: ignore[union-attr] self.properties = {**TrainProperties, **MetaProperties} self.workflow_type = "train" - elif workflow_type.lower() in self.supported_infer_type: + elif workflow_type.lower() in self.supported_infer_type: # type: ignore[union-attr] self.properties = {**InferProperties, **MetaProperties} self.workflow_type = "infer" else: diff --git a/tests/test_bundle_workflow.py b/tests/test_bundle_workflow.py index f7da37acef..6ed33b085e 100644 --- a/tests/test_bundle_workflow.py +++ b/tests/test_bundle_workflow.py @@ -33,7 +33,10 @@ TEST_CASE_2 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.yaml")] -TEST_CASE_3 = [os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json")] +TEST_CASE_3 = [ + os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json"), + os.path.join(os.path.dirname(__file__), "testing_data", "fl_train_properties.json"), +] class TestBundleWorkflow(unittest.TestCase): @@ -101,10 +104,11 @@ def test_inference_config(self, config_file): logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"), **override, ) + inferer.add_property(name="inferer", required=True, config_id="inferer") self._test_inferer(inferer) @parameterized.expand([TEST_CASE_3]) - def test_train_config(self, config_file): + def test_train_config(self, config_file, properties_path): # test standard MONAI model-zoo config workflow trainer = ConfigWorkflow( workflow_type="train", @@ -113,6 +117,7 @@ def test_train_config(self, config_file): init_id="initialize", run_id="run", final_id="finalize", + properties_path=properties_path, ) # should initialize before parsing any bundle content trainer.initialize() @@ -144,6 +149,7 @@ def test_train_config(self, config_file): def test_non_config(self): # test user defined python style workflow inferer = NonConfigWorkflow(self.filename, self.data_dir) + inferer.add_property(name="inferer", required=True) self._test_inferer(inferer) diff --git a/tests/testing_data/fl_train_properties.json b/tests/testing_data/fl_train_properties.json new file mode 100644 index 0000000000..1ca9319cd8 --- /dev/null +++ b/tests/testing_data/fl_train_properties.json @@ -0,0 +1,126 @@ +{ + "bundle_root": { + "description": "root path of the bundle.", + "required": true, + "id": "bundle_root" + }, + "device": { + "description": "target device to execute the bundle workflow.", + "required": true, + "id": "device" + }, + "dataset_dir": { + "description": "directory path of the dataset.", + "required": true, + "id": "dataset_dir" + }, + "trainer": { + "description": "training workflow engine.", + "required": true, + "id": "train::trainer" + }, + "network_def": { + "description": "network module for the training.", + "required": false, + "id": "network_def" + }, + "max_epochs": { + "description": "max number of epochs to execute the training.", + "required": true, + "id": "train::trainer::max_epochs" + }, + "train_dataset": { + "description": "PyTorch dataset object for the training logic.", + "required": true, + "id": "train::dataset" + }, + "train_inferer": { + "description": "MONAI Inferer object to execute the model computation in training.", + "required": true, + "id": "train::inferer" + }, + "train_dataset_data": { + "description": "data source for the training dataset.", + "required": false, + "id": "train::dataset::data", + "refer_id": null + }, + "train_handlers": { + "description": "event-handlers for the training logic.", + "required": false, + "id": "train::handlers", + "refer_id": "train::trainer::train_handlers" + }, + "train_preprocessing": { + "description": "preprocessing for the training input data.", + "required": false, + "id": "train::preprocessing", + "refer_id": "train::dataset::transform" + }, + "train_postprocessing": { + "description": "postprocessing for the training model output data.", + "required": false, + "id": "train::postprocessing", + "refer_id": "train::trainer::postprocessing" + }, + "train_key_metric": { + "description": "key metric to compute on the training data.", + "required": false, + "id": "train::key_metric", + "refer_id": "train::trainer::key_train_metric" + }, + "evaluator": { + "description": "validation workflow engine.", + "required": false, + "id": "validate::evaluator", + "refer_id": "validator" + }, + "val_interval": { + "description": "validation interval during the training.", + "required": false, + "id": "val_interval", + "refer_id": "interval" + }, + "val_handlers": { + "description": "event-handlers for the validation logic.", + "required": false, + "id": "validate::handlers", + "refer_id": "validate::evaluator::val_handlers" + }, + "val_dataset": { + "description": "PyTorch dataset object for the validation logic.", + "required": false, + "id": "validate::dataset", + "refer_id": "validate::dataloader::dataset" + }, + "val_dataset_data": { + "description": "data source for the validation dataset.", + "required": false, + "id": "validate::dataset::data", + "refer_id": null + }, + "val_inferer": { + "description": "MONAI Inferer object to execute the model computation in validation.", + "required": false, + "id": "validate::inferer", + "refer_id": "validate::evaluator::inferer" + }, + "val_preprocessing": { + "description": "preprocessing for the validation input data.", + "required": false, + "id": "validate::preprocessing", + "refer_id": "validate::dataset::transform" + }, + "val_postprocessing": { + "description": "postprocessing for the validation model output data.", + "required": false, + "id": "validate::postprocessing", + "refer_id": "validate::evaluator::postprocessing" + }, + "val_key_metric": { + "description": "key metric to compute on the validation data.", + "required": false, + "id": "validate::key_metric", + "refer_id": "validate::evaluator::key_val_metric" + } +} From e274826c04d66f0b3402823a473e1e05434ff111 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 26 Mar 2024 23:01:44 +0800 Subject: [PATCH 08/16] Revert "Merge remote-tracking branch 'yliu/bundle' into bundle" This reverts commit e941dde725dc6ba2d669563a329c205498dd8dcc, reversing changes made to 5aa4f381251315d6fbbd2d672666cf8d6fcbf347. --- monai/bundle/workflows.py | 10 +- tests/test_bundle_workflow.py | 10 +- tests/testing_data/fl_train_properties.json | 126 -------------------- 3 files changed, 6 insertions(+), 140 deletions(-) delete mode 100644 tests/testing_data/fl_train_properties.json diff --git a/monai/bundle/workflows.py b/monai/bundle/workflows.py index 135ddcb1c2..006ec06ced 100644 --- a/monai/bundle/workflows.py +++ b/monai/bundle/workflows.py @@ -21,11 +21,11 @@ from pathlib import Path from typing import Any, Sequence +from monai.config import PathLike from monai.apps.utils import get_logger from monai.bundle.config_parser import ConfigParser from monai.bundle.properties import InferProperties, MetaProperties, TrainProperties from monai.bundle.utils import DEFAULT_EXP_MGMT_SETTINGS, EXPR_KEY, ID_REF_KEY, ID_SEP_KEY -from monai.config import PathLike from monai.utils import BundleProperty, BundlePropertyConfig, deprecated_arg, deprecated_arg_default, ensure_tuple __all__ = ["BundleWorkflow", "ConfigWorkflow"] @@ -62,9 +62,7 @@ class BundleWorkflow(ABC): new_name="workflow_type", msg_suffix="please use `workflow_type` instead.", ) - def __init__( - self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None - ): + def __init__(self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None): workflow_type = workflow if workflow is not None else workflow_type if workflow_type is None and properties_path is None: self.properties = copy(MetaProperties) @@ -78,10 +76,10 @@ def __init__( self.properties = json.load(json_file) self.workflow_type = None return - if workflow_type.lower() in self.supported_train_type: # type: ignore[union-attr] + if workflow_type.lower() in self.supported_train_type: self.properties = {**TrainProperties, **MetaProperties} self.workflow_type = "train" - elif workflow_type.lower() in self.supported_infer_type: # type: ignore[union-attr] + elif workflow_type.lower() in self.supported_infer_type: self.properties = {**InferProperties, **MetaProperties} self.workflow_type = "infer" else: diff --git a/tests/test_bundle_workflow.py b/tests/test_bundle_workflow.py index 6ed33b085e..f7da37acef 100644 --- a/tests/test_bundle_workflow.py +++ b/tests/test_bundle_workflow.py @@ -33,10 +33,7 @@ TEST_CASE_2 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.yaml")] -TEST_CASE_3 = [ - os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json"), - os.path.join(os.path.dirname(__file__), "testing_data", "fl_train_properties.json"), -] +TEST_CASE_3 = [os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json")] class TestBundleWorkflow(unittest.TestCase): @@ -104,11 +101,10 @@ def test_inference_config(self, config_file): logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"), **override, ) - inferer.add_property(name="inferer", required=True, config_id="inferer") self._test_inferer(inferer) @parameterized.expand([TEST_CASE_3]) - def test_train_config(self, config_file, properties_path): + def test_train_config(self, config_file): # test standard MONAI model-zoo config workflow trainer = ConfigWorkflow( workflow_type="train", @@ -117,7 +113,6 @@ def test_train_config(self, config_file, properties_path): init_id="initialize", run_id="run", final_id="finalize", - properties_path=properties_path, ) # should initialize before parsing any bundle content trainer.initialize() @@ -149,7 +144,6 @@ def test_train_config(self, config_file, properties_path): def test_non_config(self): # test user defined python style workflow inferer = NonConfigWorkflow(self.filename, self.data_dir) - inferer.add_property(name="inferer", required=True) self._test_inferer(inferer) diff --git a/tests/testing_data/fl_train_properties.json b/tests/testing_data/fl_train_properties.json deleted file mode 100644 index 1ca9319cd8..0000000000 --- a/tests/testing_data/fl_train_properties.json +++ /dev/null @@ -1,126 +0,0 @@ -{ - "bundle_root": { - "description": "root path of the bundle.", - "required": true, - "id": "bundle_root" - }, - "device": { - "description": "target device to execute the bundle workflow.", - "required": true, - "id": "device" - }, - "dataset_dir": { - "description": "directory path of the dataset.", - "required": true, - "id": "dataset_dir" - }, - "trainer": { - "description": "training workflow engine.", - "required": true, - "id": "train::trainer" - }, - "network_def": { - "description": "network module for the training.", - "required": false, - "id": "network_def" - }, - "max_epochs": { - "description": "max number of epochs to execute the training.", - "required": true, - "id": "train::trainer::max_epochs" - }, - "train_dataset": { - "description": "PyTorch dataset object for the training logic.", - "required": true, - "id": "train::dataset" - }, - "train_inferer": { - "description": "MONAI Inferer object to execute the model computation in training.", - "required": true, - "id": "train::inferer" - }, - "train_dataset_data": { - "description": "data source for the training dataset.", - "required": false, - "id": "train::dataset::data", - "refer_id": null - }, - "train_handlers": { - "description": "event-handlers for the training logic.", - "required": false, - "id": "train::handlers", - "refer_id": "train::trainer::train_handlers" - }, - "train_preprocessing": { - "description": "preprocessing for the training input data.", - "required": false, - "id": "train::preprocessing", - "refer_id": "train::dataset::transform" - }, - "train_postprocessing": { - "description": "postprocessing for the training model output data.", - "required": false, - "id": "train::postprocessing", - "refer_id": "train::trainer::postprocessing" - }, - "train_key_metric": { - "description": "key metric to compute on the training data.", - "required": false, - "id": "train::key_metric", - "refer_id": "train::trainer::key_train_metric" - }, - "evaluator": { - "description": "validation workflow engine.", - "required": false, - "id": "validate::evaluator", - "refer_id": "validator" - }, - "val_interval": { - "description": "validation interval during the training.", - "required": false, - "id": "val_interval", - "refer_id": "interval" - }, - "val_handlers": { - "description": "event-handlers for the validation logic.", - "required": false, - "id": "validate::handlers", - "refer_id": "validate::evaluator::val_handlers" - }, - "val_dataset": { - "description": "PyTorch dataset object for the validation logic.", - "required": false, - "id": "validate::dataset", - "refer_id": "validate::dataloader::dataset" - }, - "val_dataset_data": { - "description": "data source for the validation dataset.", - "required": false, - "id": "validate::dataset::data", - "refer_id": null - }, - "val_inferer": { - "description": "MONAI Inferer object to execute the model computation in validation.", - "required": false, - "id": "validate::inferer", - "refer_id": "validate::evaluator::inferer" - }, - "val_preprocessing": { - "description": "preprocessing for the validation input data.", - "required": false, - "id": "validate::preprocessing", - "refer_id": "validate::dataset::transform" - }, - "val_postprocessing": { - "description": "postprocessing for the validation model output data.", - "required": false, - "id": "validate::postprocessing", - "refer_id": "validate::evaluator::postprocessing" - }, - "val_key_metric": { - "description": "key metric to compute on the validation data.", - "required": false, - "id": "validate::key_metric", - "refer_id": "validate::evaluator::key_val_metric" - } -} From d6715122805cd498179547829f5bfe573bfd3881 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 26 Mar 2024 23:02:26 +0800 Subject: [PATCH 09/16] Revert "Merge branch 'bundle' of https://github.com/KumoLiu/MONAI into bundle" This reverts commit cca300e6c28f29909cce6f88f181d6b202e18c95, reversing changes made to 67670a1cb886113770550e52a6f433e25e723092. Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> From ca90a4b3c5fd3e71f1b51646eb346b0db466883c Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 26 Mar 2024 23:04:29 +0800 Subject: [PATCH 10/16] Revert "remove ignite based properties" This reverts commit 5aa4f381251315d6fbbd2d672666cf8d6fcbf347. --- monai/bundle/properties.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/monai/bundle/properties.py b/monai/bundle/properties.py index 8ebffa9de4..a75e862a84 100644 --- a/monai/bundle/properties.py +++ b/monai/bundle/properties.py @@ -43,6 +43,11 @@ BundleProperty.REQUIRED: True, BundlePropertyConfig.ID: "dataset_dir", }, + "trainer": { + BundleProperty.DESC: "training workflow engine.", + BundleProperty.REQUIRED: True, + BundlePropertyConfig.ID: f"train{ID_SEP_KEY}trainer", + }, "network_def": { BundleProperty.DESC: "network module for the training.", BundleProperty.REQUIRED: False, @@ -58,12 +63,23 @@ BundleProperty.REQUIRED: True, BundlePropertyConfig.ID: f"train{ID_SEP_KEY}dataset", }, + "train_inferer": { + BundleProperty.DESC: "MONAI Inferer object to execute the model computation in training.", + BundleProperty.REQUIRED: True, + BundlePropertyConfig.ID: f"train{ID_SEP_KEY}inferer", + }, "train_dataset_data": { BundleProperty.DESC: "data source for the training dataset.", BundleProperty.REQUIRED: False, BundlePropertyConfig.ID: f"train{ID_SEP_KEY}dataset{ID_SEP_KEY}data", BundlePropertyConfig.REF_ID: None, # no reference to this ID }, + "train_handlers": { + BundleProperty.DESC: "event-handlers for the training logic.", + BundleProperty.REQUIRED: False, + BundlePropertyConfig.ID: f"train{ID_SEP_KEY}handlers", + BundlePropertyConfig.REF_ID: f"train{ID_SEP_KEY}trainer{ID_SEP_KEY}train_handlers", + }, "train_preprocessing": { BundleProperty.DESC: "preprocessing for the training input data.", BundleProperty.REQUIRED: False, @@ -82,6 +98,12 @@ BundlePropertyConfig.ID: f"train{ID_SEP_KEY}key_metric", BundlePropertyConfig.REF_ID: f"train{ID_SEP_KEY}trainer{ID_SEP_KEY}key_train_metric", }, + "evaluator": { + BundleProperty.DESC: "validation workflow engine.", + BundleProperty.REQUIRED: False, + BundlePropertyConfig.ID: f"validate{ID_SEP_KEY}evaluator", + BundlePropertyConfig.REF_ID: "validator", # this REF_ID is the arg name of `ValidationHandler` + }, "val_interval": { BundleProperty.DESC: "validation interval during the training.", BundleProperty.REQUIRED: False, @@ -153,17 +175,33 @@ BundleProperty.REQUIRED: True, BundlePropertyConfig.ID: "dataset", }, + "evaluator": { + BundleProperty.DESC: "inference / evaluation workflow engine.", + BundleProperty.REQUIRED: True, + BundlePropertyConfig.ID: "evaluator", + }, "network_def": { BundleProperty.DESC: "network module for the inference.", BundleProperty.REQUIRED: True, BundlePropertyConfig.ID: "network_def", }, + "inferer": { + BundleProperty.DESC: "MONAI Inferer object to execute the model computation in inference.", + BundleProperty.REQUIRED: True, + BundlePropertyConfig.ID: "inferer", + }, "dataset_data": { BundleProperty.DESC: "data source for the inference / evaluation dataset.", BundleProperty.REQUIRED: False, BundlePropertyConfig.ID: f"dataset{ID_SEP_KEY}data", BundlePropertyConfig.REF_ID: None, # no reference to this ID }, + "handlers": { + BundleProperty.DESC: "event-handlers for the inference / evaluation logic.", + BundleProperty.REQUIRED: False, + BundlePropertyConfig.ID: "handlers", + BundlePropertyConfig.REF_ID: f"evaluator{ID_SEP_KEY}val_handlers", + }, "preprocessing": { BundleProperty.DESC: "preprocessing for the input data.", BundleProperty.REQUIRED: False, From 6526f59673404d46386ae4eaf9cb4cd6fa01b211 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 26 Mar 2024 23:09:24 +0800 Subject: [PATCH 11/16] DCO Remediation Commit for YunLiu <55491388+KumoLiu@users.noreply.github.com> I, YunLiu <55491388+KumoLiu@users.noreply.github.com>, hereby add my Signed-off-by to this commit: 06d9a3484717ead6680f32dea331e21a9d176b74 I, YunLiu <55491388+KumoLiu@users.noreply.github.com>, hereby add my Signed-off-by to this commit: 22735af496515943803fa4ed2a63655b2df38d3b I, YunLiu <55491388+KumoLiu@users.noreply.github.com>, hereby add my Signed-off-by to this commit: e274826c04d66f0b3402823a473e1e05434ff111 I, YunLiu <55491388+KumoLiu@users.noreply.github.com>, hereby add my Signed-off-by to this commit: ca90a4b3c5fd3e71f1b51646eb346b0db466883c Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> From 54ac3757c536140bfd7b8cf45b715fc1b32583b1 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 26 Mar 2024 23:19:48 +0800 Subject: [PATCH 12/16] fix flake8 Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/bundle/workflows.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/monai/bundle/workflows.py b/monai/bundle/workflows.py index 006ec06ced..0b2e1c7742 100644 --- a/monai/bundle/workflows.py +++ b/monai/bundle/workflows.py @@ -21,11 +21,11 @@ from pathlib import Path from typing import Any, Sequence -from monai.config import PathLike from monai.apps.utils import get_logger from monai.bundle.config_parser import ConfigParser from monai.bundle.properties import InferProperties, MetaProperties, TrainProperties from monai.bundle.utils import DEFAULT_EXP_MGMT_SETTINGS, EXPR_KEY, ID_REF_KEY, ID_SEP_KEY +from monai.config import PathLike from monai.utils import BundleProperty, BundlePropertyConfig, deprecated_arg, deprecated_arg_default, ensure_tuple __all__ = ["BundleWorkflow", "ConfigWorkflow"] @@ -62,7 +62,9 @@ class BundleWorkflow(ABC): new_name="workflow_type", msg_suffix="please use `workflow_type` instead.", ) - def __init__(self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None): + def __init__( + self, workflow_type: str | None = None, workflow: str | None = None, properties_path: PathLike | None = None + ): workflow_type = workflow if workflow is not None else workflow_type if workflow_type is None and properties_path is None: self.properties = copy(MetaProperties) From 40bb1300e8baccecbe05a61726ba508d27753eb2 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 26 Mar 2024 23:29:31 +0800 Subject: [PATCH 13/16] add determinism for mixup Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- tests/test_regularization.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tests/test_regularization.py b/tests/test_regularization.py index d381ea72ca..c6f727cb54 100644 --- a/tests/test_regularization.py +++ b/tests/test_regularization.py @@ -16,9 +16,16 @@ import torch from monai.transforms import CutMix, CutMixd, CutOut, MixUp, MixUpd +from monai.utils import set_determinism class TestMixup(unittest.TestCase): + def setUp(self) -> None: + set_determinism(seed=0) + + def tearDown(self) -> None: + set_determinism(None) + def test_mixup(self): for dims in [2, 3]: shape = (6, 3) + (32,) * dims @@ -52,6 +59,12 @@ def test_mixupd(self): class TestCutMix(unittest.TestCase): + def setUp(self) -> None: + set_determinism(seed=0) + + def tearDown(self) -> None: + set_determinism(None) + def test_cutmix(self): for dims in [2, 3]: shape = (6, 3) + (32,) * dims @@ -76,6 +89,12 @@ def test_cutmixd(self): class TestCutOut(unittest.TestCase): + def setUp(self) -> None: + set_determinism(seed=0) + + def tearDown(self) -> None: + set_determinism(None) + def test_cutout(self): for dims in [2, 3]: shape = (6, 3) + (32,) * dims From 5e6edaa5d7887200e6905fbf9064cafb2512373c Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 26 Mar 2024 23:32:21 +0800 Subject: [PATCH 14/16] fix mypy Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/bundle/workflows.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/bundle/workflows.py b/monai/bundle/workflows.py index 0b2e1c7742..135ddcb1c2 100644 --- a/monai/bundle/workflows.py +++ b/monai/bundle/workflows.py @@ -78,10 +78,10 @@ def __init__( self.properties = json.load(json_file) self.workflow_type = None return - if workflow_type.lower() in self.supported_train_type: + if workflow_type.lower() in self.supported_train_type: # type: ignore[union-attr] self.properties = {**TrainProperties, **MetaProperties} self.workflow_type = "train" - elif workflow_type.lower() in self.supported_infer_type: + elif workflow_type.lower() in self.supported_infer_type: # type: ignore[union-attr] self.properties = {**InferProperties, **MetaProperties} self.workflow_type = "infer" else: From 918cb7792dadea52e663c28fc8520e9c7b024710 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Wed, 27 Mar 2024 22:30:04 +0800 Subject: [PATCH 15/16] add unittest Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- tests/test_bundle_workflow.py | 10 +++ tests/testing_data/fl_infer_properties.json | 67 +++++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 tests/testing_data/fl_infer_properties.json diff --git a/tests/test_bundle_workflow.py b/tests/test_bundle_workflow.py index f7da37acef..f6bfdc4a3d 100644 --- a/tests/test_bundle_workflow.py +++ b/tests/test_bundle_workflow.py @@ -103,6 +103,16 @@ def test_inference_config(self, config_file): ) self._test_inferer(inferer) + # test property path + inferer = ConfigWorkflow( + config_file=config_file, + properties_path=os.path.join(os.path.dirname(__file__), "testing_data", "fl_infer_properties.json"), + logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"), + **override, + ) + self._test_inferer(inferer) + self.assertEqual(inferer.workflow_type, None) + @parameterized.expand([TEST_CASE_3]) def test_train_config(self, config_file): # test standard MONAI model-zoo config workflow diff --git a/tests/testing_data/fl_infer_properties.json b/tests/testing_data/fl_infer_properties.json new file mode 100644 index 0000000000..72e97cd2c6 --- /dev/null +++ b/tests/testing_data/fl_infer_properties.json @@ -0,0 +1,67 @@ +{ + "bundle_root": { + "description": "root path of the bundle.", + "required": true, + "id": "bundle_root" + }, + "device": { + "description": "target device to execute the bundle workflow.", + "required": true, + "id": "device" + }, + "dataset_dir": { + "description": "directory path of the dataset.", + "required": true, + "id": "dataset_dir" + }, + "dataset": { + "description": "PyTorch dataset object for the inference / evaluation logic.", + "required": true, + "id": "dataset" + }, + "evaluator": { + "description": "inference / evaluation workflow engine.", + "required": true, + "id": "evaluator" + }, + "network_def": { + "description": "network module for the inference.", + "required": true, + "id": "network_def" + }, + "inferer": { + "description": "MONAI Inferer object to execute the model computation in inference.", + "required": true, + "id": "inferer" + }, + "dataset_data": { + "description": "data source for the inference / evaluation dataset.", + "required": false, + "id": "dataset::data", + "refer_id": null + }, + "handlers": { + "description": "event-handlers for the inference / evaluation logic.", + "required": false, + "id": "handlers", + "refer_id": "evaluator::val_handlers" + }, + "preprocessing": { + "description": "preprocessing for the input data.", + "required": false, + "id": "preprocessing", + "refer_id": "dataset::transform" + }, + "postprocessing": { + "description": "postprocessing for the model output data.", + "required": false, + "id": "postprocessing", + "refer_id": "evaluator::postprocessing" + }, + "key_metric": { + "description": "the key metric during evaluation.", + "required": false, + "id": "key_metric", + "refer_id": "evaluator::key_val_metric" + } +} From b320035a0984d52617cc95a7e05a9deec17eddb8 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Mon, 1 Apr 2024 17:03:07 +0800 Subject: [PATCH 16/16] fix ci Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- monai/bundle/workflows.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monai/bundle/workflows.py b/monai/bundle/workflows.py index a98a468786..d876f6d7ae 100644 --- a/monai/bundle/workflows.py +++ b/monai/bundle/workflows.py @@ -108,6 +108,7 @@ def __init__( with open(properties_path) as json_file: self.properties = json.load(json_file) self.workflow_type = None + self.meta_file = meta_file return if workflow_type.lower() in self.supported_train_type: # type: ignore[union-attr] self.properties = {**TrainProperties, **MetaProperties}