From adda4b72a9d78523cd3160bd4688b3e7ec52de22 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Mon, 28 Mar 2022 13:16:36 +0800 Subject: [PATCH 01/15] [DLMED] add tutorial Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 209 ++++++++++++++++++++++++++++++ 1 file changed, 209 insertions(+) create mode 100644 modules/bundles/get_started.ipynb diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb new file mode 100644 index 0000000000..db0598073d --- /dev/null +++ b/modules/bundles/get_started.ipynb @@ -0,0 +1,209 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Overview\n", + "\n", + "MONAI bundle usually includes the stored weights of a model, TorchScript model, JSON files that include configs and metadata about the model, information for constructing training, inference, and post-processing transform sequences, plain-text description, legal information, and other data the model creator wishes to include.\n", + "\n", + "For more information about MONAI bundle description: https://docs.monai.io/en/latest/mb_specification.html.\n", + "\n", + "This notebook is step-by-step tutorial to help get started to develop a bundle package. It at least contains a `train.json` config file to construct the training pipeline and may also have a `metadata.json` file to define the metadata information.\n", + "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Project-MONAI/tutorials/blob/master/modules/bundles/get_started.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup environment" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "!python -c \"import monai\" || pip install -q \"monai-weekly[nibabel]\"" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "from monai.config import print_config\n", + "from monai.bundle import ConfigParser" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Copyright (c) MONAI Consortium\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "# http://www.apache.org/licenses/LICENSE-2.0\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License.\n", + "\n", + "print_config()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup data directory\n", + "\n", + "Here specify a directory with the `MONAI_DATA_DIRECTORY` environment variable to save downloaded dataset and outputs." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "root dir is: /workspace/data/medical/\n" + ] + } + ], + "source": [ + "root_dir = os.environ.get(\"MONAI_DATA_DIRECTORY\")\n", + "print(f\"root dir is: {root_dir}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Download dataset\n", + "\n", + "Downloads and extracts the dataset. \n", + "The dataset comes from http://medicaldecathlon.com/." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "resource = \"https://msd-for-monai.s3-us-west-2.amazonaws.com/Task09_Spleen.tar\"\n", + "md5 = \"410d4a301da4e5b2f6f86ec3ddba524e\"\n", + "\n", + "compressed_file = os.path.join(root_dir, \"Task09_Spleen.tar\")\n", + "data_dir = os.path.join(root_dir, \"Task09_Spleen\")\n", + "if not os.path.exists(data_dir):\n", + " download_and_extract(resource, compressed_file, root_dir, md5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define train config - Set MSD Spleen dataset path\n", + "\n", + "The following groups images and labels from `Task09_Spleen/imagesTr` and `Task09_Spleen/labelsTr` into pairs." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "train_images = sorted(\n", + " glob.glob(os.path.join(data_dir, \"imagesTr\", \"*.nii.gz\")))\n", + "train_labels = sorted(\n", + " glob.glob(os.path.join(data_dir, \"labelsTr\", \"*.nii.gz\")))\n", + "data_dicts = [\n", + " {\"image\": image_name, \"label\": label_name}\n", + " for image_name, label_name in zip(train_images, train_labels)\n", + "]\n", + "train_data_dicts, val_data_dicts = data_dicts[:-9], data_dicts[-9:]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The image file names are organised into a list of dictionaries." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Cleanup data directory\n", + "\n", + "Remove directory if a temporary was used." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "if directory is None:\n", + " shutil.rmtree(root_dir)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 6d8c6199bcb603b5be801e28fb11120eb6b75d23 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Mon, 28 Mar 2022 20:17:46 +0800 Subject: [PATCH 02/15] [DLMED] add imports Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 41 +++++++++++++++++-------------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index db0598073d..81c7c70843 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -134,9 +134,17 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Define train config - Set MSD Spleen dataset path\n", + "## Define train config - Set imports and input / output environments\n", "\n", - "The following groups images and labels from `Task09_Spleen/imagesTr` and `Task09_Spleen/labelsTr` into pairs." + "Now let's start to define the `train.json` config file for a regular training task.\n", + "\n", + "For more details about the syntax in bundle config, please check: https://github.com/wyli/MONAI/blob/3482-bundle-doc/docs/source/config_syntax.md.\n", + "\n", + "Please note that MONAI bundle doesn't require any hard-code logic in the config, so users can define the config content in any structure.\n", + "\n", + "For the first step, import `os` and `glob` to use in the expressions (start with `$`). Then define input / output environments and enable `cudnn.benchmark` for better performance.\n", + "\n", + "The train and validation image file names are organized into a list of dictionaries." ] }, { @@ -145,22 +153,19 @@ "metadata": {}, "outputs": [], "source": [ - "train_images = sorted(\n", - " glob.glob(os.path.join(data_dir, \"imagesTr\", \"*.nii.gz\")))\n", - "train_labels = sorted(\n", - " glob.glob(os.path.join(data_dir, \"labelsTr\", \"*.nii.gz\")))\n", - "data_dicts = [\n", - " {\"image\": image_name, \"label\": label_name}\n", - " for image_name, label_name in zip(train_images, train_labels)\n", - "]\n", - "train_data_dicts, val_data_dicts = data_dicts[:-9], data_dicts[-9:]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The image file names are organised into a list of dictionaries." + "{\n", + " \"imports\": [\n", + " \"$import glob\",\n", + " \"$import os\"\n", + " ],\n", + " \"device\": \"$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\",\n", + " \"cudnn_opt\": \"$setattr(torch.backends.cudnn, 'benchmark', True)\",\n", + " \"dataset_dir\": \"/workspace/data/Task09_Spleen\",\n", + " \"ckpt_path\": \"/workspace/data/models/model.pt\",\n", + " \"datalist\": \"$list(sorted(glob.glob(@dataset_dir + '/imagesTs/*.nii.gz')))\",\n", + " \"train_data\": \"$[{'image': i} for i in @datalist[:-9]]\",\n", + " \"val_data\": \"$[{'image': i} for i in @datalist[-9:]]\"\n", + "}" ] }, { From f808c0a2a1f2086ed19e5904109438e75870cfb5 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Mon, 28 Mar 2022 22:16:01 +0800 Subject: [PATCH 03/15] [DLMED] add train config Signed-off-by: Nic Ma --- .../configs/inference.json | 4 +- .../spleen_segmentation/configs/train.json | 225 ++++++++++++++++++ .../spleen_segmentation/docs/README.md | 7 + 3 files changed, 234 insertions(+), 2 deletions(-) create mode 100644 modules/bundles/spleen_segmentation/configs/train.json diff --git a/modules/bundles/spleen_segmentation/configs/inference.json b/modules/bundles/spleen_segmentation/configs/inference.json index 52a7c20e86..6c2f9ba1d4 100644 --- a/modules/bundles/spleen_segmentation/configs/inference.json +++ b/modules/bundles/spleen_segmentation/configs/inference.json @@ -4,10 +4,10 @@ "$import os" ], "cudnn_opt": "$setattr(torch.backends.cudnn, 'benchmark', True)", - "dataset_dir": "/workspace/data/Task09_Spleen", + "device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')", "ckpt_path": "/workspace/data/tutorials/modules/bundles/spleen_segmentation/models/model.pt", "download_ckpt": "$monai.apps.utils.download_url('https://huggingface.co/MONAI/example_spleen_segmentation/resolve/main/model.pt', @ckpt_path)", - "device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')", + "dataset_dir": "/workspace/data/Task09_Spleen", "datalist": "$list(sorted(glob.glob(@dataset_dir + '/imagesTs/*.nii.gz')))", "network_def": { "_target_": "UNet", diff --git a/modules/bundles/spleen_segmentation/configs/train.json b/modules/bundles/spleen_segmentation/configs/train.json new file mode 100644 index 0000000000..048478d06f --- /dev/null +++ b/modules/bundles/spleen_segmentation/configs/train.json @@ -0,0 +1,225 @@ +{ + "imports": [ + "$import glob", + "$import os" + ], + "determinism": "$monai.utils.set_determinism(seed=123)", + "cudnn_opt": "$setattr(torch.backends.cudnn, 'benchmark', True)", + "device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')", + "ckpt_path": "/workspace/data/tutorials/modules/bundles/spleen_segmentation/models/model.pt", + "dataset_dir": "/workspace/data/Task09_Spleen", + "images": "$list(sorted(glob.glob(@dataset_dir + '/imagesTr/*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/labelsTr/*.nii.gz')))", + "network_def": { + "_target_": "UNet", + "spatial_dims": 3, + "in_channels": 1, + "out_channels": 2, + "channels": [ + 16, + 32, + 64, + 128, + 256 + ], + "strides": [ + 2, + 2, + 2, + 2 + ], + "num_res_units": 2, + "norm": "batch" + }, + "network": "$@network_def.to(@device)", + "train": { + "preprocessing": { + "_target_": "Compose", + "transforms": [ + { + "_target_": "LoadImaged", + "keys": ["image", "label"] + }, + { + "_target_": "EnsureChannelFirstd", + "keys": ["image", "label"] + }, + { + "_target_": "Orientationd", + "keys": ["image", "label"], + "axcodes": "RAS" + }, + { + "_target_": "Spacingd", + "keys": ["image", "label"], + "pixdim": [1.5, 1.5, 2.0], + "mode": "bilinear" + }, + { + "_target_": "ScaleIntensityRanged", + "keys": ["image", "label"], + "a_min": -57, + "a_max": 164, + "b_min": 0, + "b_max": 1, + "clip": true + }, + { + "_target_": "RandCropByPosNegLabeld", + "keys": ["image", "label"], + "label_key": "label", + "spatial_size": [96, 96, 96], + "pos": 1, + "neg": 1, + "num_samples": 4, + "image_key": "image", + "image_threshold": 0 + }, + { + "_target_": "EnsureTyped", + "keys": ["image", "label"] + } + ] + }, + "dataset": { + "_target_": "CacheDataset", + "data": "$[{'image': i, 'label': l} for i, l in zip(@images[:-9], @labels[:-9])]", + "transform": "@train#preprocessing", + "cache_rate": 1.0, + "num_workers": 4 + }, + "dataloader": { + "_target_": "DataLoader", + "dataset": "@train#dataset", + "batch_size": 1, + "shuffle": false, + "num_workers": 4 + } + }, + "validate": { + "preprocessing": { + "_target_": "Compose", + "transforms": [ + { + "_target_": "LoadImaged", + "keys": ["image", "label"] + }, + { + "_target_": "EnsureChannelFirstd", + "keys": ["image", "label"] + }, + { + "_target_": "Orientationd", + "keys": ["image", "label"], + "axcodes": "RAS" + }, + { + "_target_": "Spacingd", + "keys": ["image", "label"], + "pixdim": [1.5, 1.5, 2.0], + "mode": "bilinear" + }, + { + "_target_": "ScaleIntensityRanged", + "keys": ["image", "label"], + "a_min": -57, + "a_max": 164, + "b_min": 0, + "b_max": 1, + "clip": true + }, + { + "_target_": "RandCropByPosNegLabeld", + "keys": ["image", "label"], + "label_key": "label", + "spatial_size": [96, 96, 96], + "pos": 1, + "neg": 1, + "num_samples": 4, + "image_key": "image", + "image_threshold": 0 + }, + { + "_target_": "EnsureTyped", + "keys": ["image", "label"] + } + ] + }, + "dataset": { + "_target_": "Dataset", + "data": "$[{'image': i, 'label': l} for i, l in zip(@images[-9:], @labels[-9:])]", + "transform": "@preprocessing" + }, + "dataloader": { + "_target_": "DataLoader", + "dataset": "@dataset", + "batch_size": 1, + "shuffle": false, + "num_workers": 4 + }, + "inferer": { + "_target_": "SlidingWindowInferer", + "roi_size": [ + 96, + 96, + 96 + ], + "sw_batch_size": 4, + "overlap": 0.5 + }, + "postprocessing": { + "_target_": "Compose", + "transforms": [ + { + "_target_": "Activationsd", + "keys": "pred", + "softmax": true + }, + { + "_target_": "Invertd", + "keys": "pred", + "transform": "@preprocessing", + "orig_keys": "image", + "meta_key_postfix": "meta_dict", + "nearest_interp": false, + "to_tensor": true + }, + { + "_target_": "AsDiscreted", + "keys": "pred", + "argmax": true + }, + { + "_target_": "SaveImaged", + "keys": "pred", + "meta_keys": "pred_meta_dict", + "output_dir": "eval" + } + ] + }, + "handlers": [ + { + "_target_": "CheckpointLoader", + "_requires_": "@download_ckpt", + "_disabled_": "$not os.path.exists(@ckpt_path)", + "load_path": "@ckpt_path", + "load_dict": {"model": "@network"} + }, + { + "_target_": "StatsHandler", + "iteration_log": false + } + ] + }, + "trainer": { + "_target_": "SupervisedTrainer", + "_requires_": ["determinism", "@cudnn_opt"], + "device": "@device", + "val_data_loader": "@dataloader", + "network": "@network", + "inferer": "@inferer", + "postprocessing": "@postprocessing", + "val_handlers": "@handlers", + "amp": false + } +} diff --git a/modules/bundles/spleen_segmentation/docs/README.md b/modules/bundles/spleen_segmentation/docs/README.md index bab58e643c..2c286b4609 100644 --- a/modules/bundles/spleen_segmentation/docs/README.md +++ b/modules/bundles/spleen_segmentation/docs/README.md @@ -24,18 +24,25 @@ Mean Dice = 0.96 ## commands example Execute inference: + ``` python -m monai.bundle run evaluator --meta_file configs/metadata.json --config_file configs/inference.json --logging_file configs/logging.conf ``` + Verify the metadata format: + ``` python -m monai.bundle verify_metadata --meta_file configs/metadata.json --filepath eval/schema.json ``` + Verify the data shape of network: + ``` python -m monai.bundle verify_net_in_out network_def --meta_file configs/metadata.json --config_file configs/inference.json ``` + Export checkpoint to TorchScript file: + ``` python -m monai.bundle export network_def --filepath models/model.ts --ckpt_file models/model.pt --meta_file configs/metadata.json --config_file configs/inference.json ``` From 46a00e084f69b5b836d92c6c8b786ffaea2db98d Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 29 Mar 2022 13:32:30 +0800 Subject: [PATCH 04/15] [DLMED] add train config Signed-off-by: Nic Ma --- .../configs/inference.json | 23 +- .../spleen_segmentation/configs/train.json | 246 +++++++++--------- .../spleen_segmentation/docs/README.md | 6 + 3 files changed, 138 insertions(+), 137 deletions(-) diff --git a/modules/bundles/spleen_segmentation/configs/inference.json b/modules/bundles/spleen_segmentation/configs/inference.json index 6c2f9ba1d4..12b9d67eb9 100644 --- a/modules/bundles/spleen_segmentation/configs/inference.json +++ b/modules/bundles/spleen_segmentation/configs/inference.json @@ -14,19 +14,8 @@ "spatial_dims": 3, "in_channels": 1, "out_channels": 2, - "channels": [ - 16, - 32, - 64, - 128, - 256 - ], - "strides": [ - 2, - 2, - 2, - 2 - ], + "channels": [16, 32, 64, 128, 256], + "strides": [2, 2, 2, 2], "num_res_units": 2, "norm": "batch" }, @@ -82,11 +71,7 @@ }, "inferer": { "_target_": "SlidingWindowInferer", - "roi_size": [ - 96, - 96, - 96 - ], + "roi_size": [96, 96, 96], "sw_batch_size": 4, "overlap": 0.5 }, @@ -142,6 +127,6 @@ "inferer": "@inferer", "postprocessing": "@postprocessing", "val_handlers": "@handlers", - "amp": false + "amp": true } } diff --git a/modules/bundles/spleen_segmentation/configs/train.json b/modules/bundles/spleen_segmentation/configs/train.json index 048478d06f..9436fd50b9 100644 --- a/modules/bundles/spleen_segmentation/configs/train.json +++ b/modules/bundles/spleen_segmentation/configs/train.json @@ -1,13 +1,14 @@ { "imports": [ "$import glob", - "$import os" + "$import os", + "$import ignite" ], "determinism": "$monai.utils.set_determinism(seed=123)", "cudnn_opt": "$setattr(torch.backends.cudnn, 'benchmark', True)", "device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')", - "ckpt_path": "/workspace/data/tutorials/modules/bundles/spleen_segmentation/models/model.pt", - "dataset_dir": "/workspace/data/Task09_Spleen", + "ckpt_path": "/workspace/data/medical/tutorials/modules/bundles/spleen_segmentation/models/model.pt", + "dataset_dir": "/workspace/data/medical/Task09_Spleen", "images": "$list(sorted(glob.glob(@dataset_dir + '/imagesTr/*.nii.gz')))", "labels": "$list(sorted(glob.glob(@dataset_dir + '/labelsTr/*.nii.gz')))", "network_def": { @@ -15,23 +16,24 @@ "spatial_dims": 3, "in_channels": 1, "out_channels": 2, - "channels": [ - 16, - 32, - 64, - 128, - 256 - ], - "strides": [ - 2, - 2, - 2, - 2 - ], + "channels": [16, 32, 64, 128, 256], + "strides": [2, 2, 2, 2], "num_res_units": 2, "norm": "batch" }, "network": "$@network_def.to(@device)", + "loss": { + "_target_": "DiceCELoss", + "to_onehot_y": true, + "softmax": true, + "squared_pred": true, + "batch": true + }, + "optimizer": { + "_target_": "torch.optim.Adam", + "params": "$@network.parameters()", + "lr": 1e-4 + }, "train": { "preprocessing": { "_target_": "Compose", @@ -53,11 +55,11 @@ "_target_": "Spacingd", "keys": ["image", "label"], "pixdim": [1.5, 1.5, 2.0], - "mode": "bilinear" + "mode": ["bilinear", "nearest"] }, { "_target_": "ScaleIntensityRanged", - "keys": ["image", "label"], + "keys": "image", "a_min": -57, "a_max": 164, "b_min": 0, @@ -91,135 +93,143 @@ "dataloader": { "_target_": "DataLoader", "dataset": "@train#dataset", - "batch_size": 1, + "batch_size": 2, "shuffle": false, "num_workers": 4 - } - }, - "validate": { - "preprocessing": { + }, + "inferer": { + "_target_": "SimpleInferer" + }, + "postprocessing": { "_target_": "Compose", "transforms": [ { - "_target_": "LoadImaged", - "keys": ["image", "label"] - }, - { - "_target_": "EnsureChannelFirstd", - "keys": ["image", "label"] - }, - { - "_target_": "Orientationd", - "keys": ["image", "label"], - "axcodes": "RAS" - }, - { - "_target_": "Spacingd", - "keys": ["image", "label"], - "pixdim": [1.5, 1.5, 2.0], - "mode": "bilinear" - }, - { - "_target_": "ScaleIntensityRanged", - "keys": ["image", "label"], - "a_min": -57, - "a_max": 164, - "b_min": 0, - "b_max": 1, - "clip": true - }, - { - "_target_": "RandCropByPosNegLabeld", - "keys": ["image", "label"], - "label_key": "label", - "spatial_size": [96, 96, 96], - "pos": 1, - "neg": 1, - "num_samples": 4, - "image_key": "image", - "image_threshold": 0 + "_target_": "Activationsd", + "keys": "pred", + "softmax": true }, { - "_target_": "EnsureTyped", - "keys": ["image", "label"] + "_target_": "AsDiscreted", + "keys": ["pred", "label"], + "argmax": [true, false], + "to_onehot": 2 } ] }, + "handlers": [ + { + "_target_": "ValidationHandler", + "validator": "@validate#evaluator", + "epoch_level": true, + "interval": 5 + }, + { + "_target_": "StatsHandler", + "tag_name": "train_loss", + "output_transform": "$monai.handlers.from_engine(['loss'], first=True)" + }, + { + "_target_": "TensorBoardStatsHandler", + "log_dir": "eval", + "tag_name": "train_loss", + "output_transform": "$monai.handlers.from_engine(['loss'], first=True)" + } + ], + "key_metric": { + "train_accuracy": { + "_target_": "ignite.metrics.Accuracy", + "output_transform": "$monai.handlers.from_engine(['pred', 'label'])" + } + }, + "trainer": { + "_target_": "SupervisedTrainer", + "_requires_": ["@determinism", "@cudnn_opt"], + "max_epochs": 100, + "device": "@device", + "train_data_loader": "@train#dataloader", + "network": "@network", + "loss_function": "@loss", + "optimizer": "@optimizer", + "inferer": "@train#inferer", + "postprocessing": "@train#postprocessing", + "key_train_metric": "@train#key_metric", + "train_handlers": "@train#handlers", + "amp": true + } + }, + "validate": { + "preprocessing": { + "_target_": "Compose", + "transforms": [ + "%train#preprocessing#transforms#0", + "%train#preprocessing#transforms#1", + "%train#preprocessing#transforms#2", + "%train#preprocessing#transforms#3", + "%train#preprocessing#transforms#4", + "%train#preprocessing#transforms#6" + ] + }, "dataset": { - "_target_": "Dataset", + "_target_": "CacheDataset", "data": "$[{'image': i, 'label': l} for i, l in zip(@images[-9:], @labels[-9:])]", - "transform": "@preprocessing" + "transform": "@validate#preprocessing", + "cache_rate": 1.0 }, "dataloader": { "_target_": "DataLoader", - "dataset": "@dataset", + "dataset": "@validate#dataset", "batch_size": 1, "shuffle": false, "num_workers": 4 }, "inferer": { "_target_": "SlidingWindowInferer", - "roi_size": [ - 96, - 96, - 96 - ], + "roi_size": [96, 96, 96], "sw_batch_size": 4, "overlap": 0.5 }, - "postprocessing": { - "_target_": "Compose", - "transforms": [ - { - "_target_": "Activationsd", - "keys": "pred", - "softmax": true - }, - { - "_target_": "Invertd", - "keys": "pred", - "transform": "@preprocessing", - "orig_keys": "image", - "meta_key_postfix": "meta_dict", - "nearest_interp": false, - "to_tensor": true - }, - { - "_target_": "AsDiscreted", - "keys": "pred", - "argmax": true - }, - { - "_target_": "SaveImaged", - "keys": "pred", - "meta_keys": "pred_meta_dict", - "output_dir": "eval" - } - ] - }, + "postprocessing": "%train#postprocessing", "handlers": [ { - "_target_": "CheckpointLoader", - "_requires_": "@download_ckpt", - "_disabled_": "$not os.path.exists(@ckpt_path)", - "load_path": "@ckpt_path", - "load_dict": {"model": "@network"} + "_target_": "StatsHandler", + "iteration_log": false }, { - "_target_": "StatsHandler", + "_target_": "TensorBoardStatsHandler", + "log_dir": "eval", "iteration_log": false + }, + { + "_target_": "CheckpointSaver", + "save_dir": "@ckpt_path", + "save_dict": {"model": "@network"}, + "save_key_metric": true } - ] - }, - "trainer": { - "_target_": "SupervisedTrainer", - "_requires_": ["determinism", "@cudnn_opt"], - "device": "@device", - "val_data_loader": "@dataloader", - "network": "@network", - "inferer": "@inferer", - "postprocessing": "@postprocessing", - "val_handlers": "@handlers", - "amp": false + ], + "key_metric": { + "val_mean_dice": { + "_target_": "MeanDice", + "include_background": false, + "output_transform": "$monai.handlers.from_engine(['pred', 'label'])" + } + }, + "additional_metrics": { + "val_accuracy": { + "_target_": "ignite.metrics.Accuracy", + "output_transform": "$monai.handlers.from_engine(['pred', 'label'])" + } + }, + "evaluator": { + "_target_": "SupervisedEvaluator", + "device": "@device", + "val_data_loader": "@validate#dataloader", + "network": "@network", + "inferer": "@validate#inferer", + "postprocessing": "@validate#postprocessing", + "key_val_metric": "@validate#key_metric", + "additional_metrics": "@validate#additional_metrics", + "val_handlers": "@validate#handlers", + "amp": true + } } } diff --git a/modules/bundles/spleen_segmentation/docs/README.md b/modules/bundles/spleen_segmentation/docs/README.md index 2c286b4609..0aa2e88c8a 100644 --- a/modules/bundles/spleen_segmentation/docs/README.md +++ b/modules/bundles/spleen_segmentation/docs/README.md @@ -23,6 +23,12 @@ This model achieves the following Dice score on the validation data (our own spl Mean Dice = 0.96 ## commands example +Execute training: + +``` +python -m monai.bundle run "'train#trainer'" --meta_file configs/metadata.json --config_file configs/train.json --logging_file configs/logging.conf +``` + Execute inference: ``` From 8f9a5b91742e799b4cd2476ef734a010853005af Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Mar 2022 05:33:04 +0000 Subject: [PATCH 05/15] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- modules/bundles/spleen_segmentation/configs/train.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/bundles/spleen_segmentation/configs/train.json b/modules/bundles/spleen_segmentation/configs/train.json index 9436fd50b9..63b895f46f 100644 --- a/modules/bundles/spleen_segmentation/configs/train.json +++ b/modules/bundles/spleen_segmentation/configs/train.json @@ -112,7 +112,7 @@ "_target_": "AsDiscreted", "keys": ["pred", "label"], "argmax": [true, false], - "to_onehot": 2 + "to_onehot": 2 } ] }, @@ -230,6 +230,6 @@ "additional_metrics": "@validate#additional_metrics", "val_handlers": "@validate#handlers", "amp": true - } + } } } From fa52592ff3715491c5757319562d8ba210e641c4 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 29 Mar 2022 17:53:33 +0800 Subject: [PATCH 06/15] [DLMED] add training logic in tutorial Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 315 ++++++++++++++++++++++++++++-- 1 file changed, 303 insertions(+), 12 deletions(-) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index 81c7c70843..1b794f7945 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -4,13 +4,23 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Overview\n", + "# Get started to MONAI bundle\n", "\n", "MONAI bundle usually includes the stored weights of a model, TorchScript model, JSON files that include configs and metadata about the model, information for constructing training, inference, and post-processing transform sequences, plain-text description, legal information, and other data the model creator wishes to include.\n", "\n", "For more information about MONAI bundle description: https://docs.monai.io/en/latest/mb_specification.html.\n", "\n", - "This notebook is step-by-step tutorial to help get started to develop a bundle package. It at least contains a `train.json` config file to construct the training pipeline and may also have a `metadata.json` file to define the metadata information.\n", + "This notebook is step-by-step tutorial to help get started to develop a bundle package. It contains a `train.json` config file to construct the training pipeline and also have a `metadata.json` file to define the metadata information.\n", + "\n", + "You can find the usage examples of MONAI bundle key features and syntax in this tutorial, like:\n", + "- Instantiate a python object from a dictionary config with `_target_` indicating class or function name or module path.\n", + "- Execute python expression from a string config with the `$` syntax.\n", + "- Refer to other object with the `@` syntax.\n", + "- Require other config items to execute or instantiate first with the `_requires_` syntax.\n", + "- Macro replacement with the `%` syntax to simplify the config content.\n", + "- Leverage the `_disabled_` syntax to tune or debug different components.\n", + "- Override config content at runtime.\n", + "- Hybrid programming with config and python code.\n", "\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Project-MONAI/tutorials/blob/master/modules/bundles/get_started.ipynb)" ] @@ -138,13 +148,11 @@ "\n", "Now let's start to define the `train.json` config file for a regular training task.\n", "\n", - "For more details about the syntax in bundle config, please check: https://github.com/wyli/MONAI/blob/3482-bundle-doc/docs/source/config_syntax.md.\n", + "According to the predefined syntax of MONAI bundle, `$` indicates an expression to evaluate, `@` refers to another object in the config content. For more details about the syntax in bundle config, please check: https://github.com/wyli/MONAI/blob/3482-bundle-doc/docs/source/config_syntax.md.\n", "\n", "Please note that MONAI bundle doesn't require any hard-code logic in the config, so users can define the config content in any structure.\n", "\n", - "For the first step, import `os` and `glob` to use in the expressions (start with `$`). Then define input / output environments and enable `cudnn.benchmark` for better performance.\n", - "\n", - "The train and validation image file names are organized into a list of dictionaries." + "For the first step, import `os` and `glob` to use in the expressions (start with `$`). Then define input / output environments and enable `cudnn.benchmark` for better performance." ] }, { @@ -156,15 +164,298 @@ "{\n", " \"imports\": [\n", " \"$import glob\",\n", - " \"$import os\"\n", + " \"$import os\",\n", + " \"$import ignite\"\n", " ],\n", - " \"device\": \"$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\",\n", + " \"determinism\": \"$monai.utils.set_determinism(seed=123)\",\n", " \"cudnn_opt\": \"$setattr(torch.backends.cudnn, 'benchmark', True)\",\n", - " \"dataset_dir\": \"/workspace/data/Task09_Spleen\",\n", + " \"device\": \"$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\",\n", " \"ckpt_path\": \"/workspace/data/models/model.pt\",\n", - " \"datalist\": \"$list(sorted(glob.glob(@dataset_dir + '/imagesTs/*.nii.gz')))\",\n", - " \"train_data\": \"$[{'image': i} for i in @datalist[:-9]]\",\n", - " \"val_data\": \"$[{'image': i} for i in @datalist[-9:]]\"\n", + " \"dataset_dir\": \"/workspace/data/Task09_Spleen\",\n", + " \"images\": \"$list(sorted(glob.glob(@dataset_dir + '/imagesTr/*.nii.gz')))\",\n", + " \"labels\": \"$list(sorted(glob.glob(@dataset_dir + '/labelsTr/*.nii.gz')))\"\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define train config - Define network, optimizer, loss function\n", + "\n", + "Define `UNet` of MONAI as the training network, and use the `Adam` optimizer of PyTorch, `DiceCELoss` of MONAI.\n", + "\n", + "An instantiable config component uses `_target_` keyword to define the class / function name or module path, other keys are args for the component." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"network_def\": {\n", + " \"_target_\": \"UNet\",\n", + " \"spatial_dims\": 3,\n", + " \"in_channels\": 1,\n", + " \"out_channels\": 2,\n", + " \"channels\": [16, 32, 64, 128, 256],\n", + " \"strides\": [2, 2, 2, 2],\n", + " \"num_res_units\": 2,\n", + " \"norm\": \"batch\"\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Move the network to the expected `device`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"network\": \"$@network_def.to(@device)\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define optimizer and loss function, for MONAI classes, we can use the class name directly, other classes should provide the module path (like `Adam`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"loss\": {\n", + " \"_target_\": \"DiceCELoss\",\n", + " \"to_onehot_y\": true,\n", + " \"softmax\": true,\n", + " \"squared_pred\": true,\n", + " \"batch\": true\n", + "},\n", + "\"optimizer\": {\n", + " \"_target_\": \"torch.optim.Adam\",\n", + " \"params\": \"$@network.parameters()\",\n", + " \"lr\": 1e-4\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define train config - Define data loading and preprocessing logic\n", + "\n", + "Define `transforms` and `dataset`, `dataloader` to generate training data for network.\n", + "\n", + "To make the config stucture clear, here we split the `train` and `validate` related components into 2 sections:\n", + "```\n", + "\"train\": {...},\n", + "\"validate\": {...}\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"train\": {\n", + " \"preprocessing\": {\n", + " \"_target_\": \"Compose\",\n", + " \"transforms\": [\n", + " {\n", + " \"_target_\": \"LoadImaged\",\n", + " \"keys\": [\"image\", \"label\"]\n", + " },\n", + " {\n", + " \"_target_\": \"EnsureChannelFirstd\",\n", + " \"keys\": [\"image\", \"label\"]\n", + " },\n", + " {\n", + " \"_target_\": \"Orientationd\",\n", + " \"keys\": [\"image\", \"label\"],\n", + " \"axcodes\": \"RAS\"\n", + " },\n", + " {\n", + " \"_target_\": \"Spacingd\",\n", + " \"keys\": [\"image\", \"label\"],\n", + " \"pixdim\": [1.5, 1.5, 2.0],\n", + " \"mode\": [\"bilinear\", \"nearest\"]\n", + " },\n", + " {\n", + " \"_target_\": \"ScaleIntensityRanged\",\n", + " \"keys\": \"image\",\n", + " \"a_min\": -57,\n", + " \"a_max\": 164,\n", + " \"b_min\": 0,\n", + " \"b_max\": 1,\n", + " \"clip\": true\n", + " },\n", + " {\n", + " \"_target_\": \"RandCropByPosNegLabeld\",\n", + " \"keys\": [\"image\", \"label\"],\n", + " \"label_key\": \"label\",\n", + " \"spatial_size\": [96, 96, 96],\n", + " \"pos\": 1,\n", + " \"neg\": 1,\n", + " \"num_samples\": 4,\n", + " \"image_key\": \"image\",\n", + " \"image_threshold\": 0\n", + " },\n", + " {\n", + " \"_target_\": \"EnsureTyped\",\n", + " \"keys\": [\"image\", \"label\"]\n", + " }\n", + " ]\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The train and validation image file names are organized into a list of dictionaries." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"dataset\": {\n", + " \"_target_\": \"CacheDataset\",\n", + " \"data\": \"$[{'image': i, 'label': l} for i, l in zip(@images[:-9], @labels[:-9])]\",\n", + " \"transform\": \"@train#preprocessing\",\n", + " \"cache_rate\": 1.0,\n", + " \"num_workers\": 4\n", + "},\n", + "\"dataloader\": {\n", + " \"_target_\": \"DataLoader\",\n", + " \"dataset\": \"@train#dataset\",\n", + " \"batch_size\": 2,\n", + " \"shuffle\": false,\n", + " \"num_workers\": 4\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define train config - Define inference method, post-processing and event-handlers\n", + "\n", + "Here we use `SimpleInferer` to execute `forward()` computation for the network and add post-processing methods like `activation`, `argmax`, `one-hot`, etc. And logging into stdout and TensorBoard based on event handlers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"inferer\": {\n", + " \"_target_\": \"SimpleInferer\"\n", + "},\n", + "\"postprocessing\": {\n", + " \"_target_\": \"Compose\",\n", + " \"transforms\": [\n", + " {\n", + " \"_target_\": \"Activationsd\",\n", + " \"keys\": \"pred\",\n", + " \"softmax\": true\n", + " },\n", + " {\n", + " \"_target_\": \"AsDiscreted\",\n", + " \"keys\": [\"pred\", \"label\"],\n", + " \"argmax\": [true, false],\n", + " \"to_onehot\": 2\n", + " }\n", + " ]\n", + "},\n", + "\"handlers\": [\n", + " {\n", + " \"_target_\": \"StatsHandler\",\n", + " \"tag_name\": \"train_loss\",\n", + " \"output_transform\": \"$monai.handlers.from_engine(['loss'], first=True)\"\n", + " },\n", + " {\n", + " \"_target_\": \"TensorBoardStatsHandler\",\n", + " \"log_dir\": \"eval\",\n", + " \"tag_name\": \"train_loss\",\n", + " \"output_transform\": \"$monai.handlers.from_engine(['loss'], first=True)\"\n", + " }\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define train config - Define Accuracy metric for training data to avoid over-fitting\n", + "\n", + "Here we define the `Accuracy` metric to compute on training data to help check whether the converge is expected and avoid over-fitting. Note that it's not validation step during the training." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"key_metric\": {\n", + " \"train_accuracy\": {\n", + " \"_target_\": \"ignite.metrics.Accuracy\",\n", + " \"output_transform\": \"$monai.handlers.from_engine(['pred', 'label'])\"\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define train config - Define the trainer\n", + "\n", + "Here we use MONAI engine `SupervisedTrainer` to execute a regular training.\n", + "\n", + "`determinism` and `cudnn_opt` are not args of the trainer, but should execute them before training, so here mark them in the `_requires_` field.\n", + "\n", + "If users have customized logic, then can put the logic in the `iteration_update` arg or implement their own `trainer` in python code and set `_target_` to the class directly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"trainer\": {\n", + " \"_target_\": \"SupervisedTrainer\",\n", + " \"_requires_\": [\"@determinism\", \"@cudnn_opt\"],\n", + " \"max_epochs\": 100,\n", + " \"device\": \"@device\",\n", + " \"train_data_loader\": \"@train#dataloader\",\n", + " \"network\": \"@network\",\n", + " \"loss_function\": \"@loss\",\n", + " \"optimizer\": \"@optimizer\",\n", + " \"inferer\": \"@train#inferer\",\n", + " \"postprocessing\": \"@train#postprocessing\",\n", + " \"key_train_metric\": \"@train#key_metric\",\n", + " \"train_handlers\": \"@train#handlers\",\n", + " \"amp\": true\n", "}" ] }, From a987386f4390d67d96bb6dd4d732df58c5f980f4 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 29 Mar 2022 18:03:22 +0800 Subject: [PATCH 07/15] [DLMED] add validation section Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 42 ++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index 1b794f7945..62c88cfb6b 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -10,7 +10,12 @@ "\n", "For more information about MONAI bundle description: https://docs.monai.io/en/latest/mb_specification.html.\n", "\n", - "This notebook is step-by-step tutorial to help get started to develop a bundle package. It contains a `train.json` config file to construct the training pipeline and also have a `metadata.json` file to define the metadata information.\n", + "This notebook is step-by-step tutorial to help get started to develop a bundle package, which contains a `train.json` config file to construct the training pipeline and also have a `metadata.json` file to define the metadata information.\n", + "\n", + "This notebook mainly contains below sections:\n", + "- Define a training config\n", + "- Execute training based on bundle scripts and configs\n", + "- Hybrid programming with config and python code\n", "\n", "You can find the usage examples of MONAI bundle key features and syntax in this tutorial, like:\n", "- Instantiate a python object from a dictionary config with `_target_` indicating class or function name or module path.\n", @@ -459,6 +464,41 @@ "}" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define train config - Define the validation section\n", + "\n", + "Usually we need to execute validation for every N epochs during training to verify the model and save the best model.\n", + "\n", + "Here we don't define the `validate` section step by step as it's similar to the `train` section. The full config is available: \n", + "https://github.com/Project-MONAI/tutorials/blob/master/modules/bundles/spleen_segmentation/configs/train.json\n", + "\n", + "Just show an example of `macro replacement` to simplify the config content and avoid duplicated text. Please note that it's just token text replacement of the config content, not refer to the same objects." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"validate\": {\n", + " \"preprocessing\": {\n", + " \"_target_\": \"Compose\",\n", + " \"transforms\": [\n", + " \"%train#preprocessing#transforms#0\",\n", + " \"%train#preprocessing#transforms#1\",\n", + " \"%train#preprocessing#transforms#2\",\n", + " \"%train#preprocessing#transforms#3\",\n", + " \"%train#preprocessing#transforms#4\",\n", + " \"%train#preprocessing#transforms#6\"\n", + " ]\n", + " }\n", + "}" + ] + }, { "cell_type": "markdown", "metadata": {}, From 20efa230a38e3f24a5d77b14bbb7d37c0753ee59 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 29 Mar 2022 18:07:50 +0800 Subject: [PATCH 08/15] [DLMED] add metadata description Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index 62c88cfb6b..813e0685f7 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -499,6 +499,32 @@ "}" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define metadata information\n", + "\n", + "Optinally, we can define a `metadata` file in the bundle, which contains the metadata information relating to the model, including what the shape and format of inputs and outputs are, what the meaning of the outputs are, what type of model is present, and other information. The structure is a dictionary containing a defined set of keys with additional user-specified keys.\n", + "\n", + "A typical `metadata` example is available: \n", + "https://github.com/Project-MONAI/tutorials/blob/master/modules/bundles/spleen_segmentation/configs/metadata.json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "markdown", "metadata": {}, From 99096db3410a4bbd34d8485ec66b3aed418a9ac6 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 29 Mar 2022 18:49:27 +0800 Subject: [PATCH 09/15] [DLMED] add more scripts Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index 813e0685f7..f4be5c2c97 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -511,19 +511,43 @@ "https://github.com/Project-MONAI/tutorials/blob/master/modules/bundles/spleen_segmentation/configs/metadata.json" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Execute training with bundle script - `run`\n", + "\n", + "There are several predefined scripts in MONAI bundle module to help execute `regular training`, `metadata verification base on schema`, `network input / output verification`, `export to TorchScript model`, etc.\n", + "\n", + "Here we leverage the `run` script and specify the ID of trainer in the config." + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "python -m monai.bundle run \"'train#trainer'\" --meta_file configs/metadata.json --config_file configs/train.json" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Execute training with bundle script - Override config at runtime\n", + "\n", + "To override some config items at runtime, users can specify the target `id` and `value` at command line, or override the `id` with some content in another config file." + ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json " + ] }, { "cell_type": "markdown", From 31440771309f92aeda8f22e3af32ef84b4e7790a Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 29 Mar 2022 20:05:13 +0800 Subject: [PATCH 10/15] [DLMED] add hybrid programming Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 92 ++++++++++++++++++++++++++++++- 1 file changed, 89 insertions(+), 3 deletions(-) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index f4be5c2c97..1db0248f9f 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -55,7 +55,9 @@ "outputs": [], "source": [ "import os\n", + "import shutil\n", "\n", + "from monai.apps import download_and_extract\n", "from monai.config import print_config\n", "from monai.bundle import ConfigParser" ] @@ -528,7 +530,7 @@ "metadata": {}, "outputs": [], "source": [ - "python -m monai.bundle run \"'train#trainer'\" --meta_file configs/metadata.json --config_file configs/train.json" + "python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json" ] }, { @@ -537,7 +539,7 @@ "source": [ "## Execute training with bundle script - Override config at runtime\n", "\n", - "To override some config items at runtime, users can specify the target `id` and `value` at command line, or override the `id` with some content in another config file." + "To override some config items at runtime, users can specify the target `id` and `value` at command line, or override the `id` with some content in another config file. Here we set the device to `cuda:1` at runtime." ] }, { @@ -546,7 +548,91 @@ "metadata": {}, "outputs": [], "source": [ - "python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json " + "python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json --device \"\\$torch.device('cuda:1')\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Override content from another config file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json --network \"%configs/test.json#network\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Hybrid programming with config and python code\n", + "\n", + "MONAI bundle is flexible to support customized logic, there are several ways to achieve that:\n", + "- If defining own components like transform, loss, trainer, etc. in a python file, just use its module path in `_target_`.\n", + "- Parse the config in your own python program and do lazy instantiation with customized logic.\n", + "\n", + "Here we show an example to parse the config in python code and execute the training." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "parser = ConfigParser()\n", + "parser.read_config(f=\"configs/train.json\")\n", + "parser.read_meta(f=\"configs/metadata.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`get`/`set` configuration content, the `set` method should happen before calling `parse()`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# original input channels 1\n", + "print(parser[\"network_def\"][\"in_channels\"])\n", + "# change input channels to 4\n", + "parser[\"network_def\"][\"in_channels\"] = 4\n", + "print(parser[\"network_def\"][\"in_channels\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Parse the config content and instantiate components." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# parse the structured config content\n", + "parser.parse()\n", + "# instantiate the network component and print the network structure\n", + "net = parser.get_parsed_content(\"network\")\n", + "print(net)\n", + "\n", + "# execute training\n", + "trainer = parser.get_parsed_content(\"train#trainer\")\n", + "trainer.run()" ] }, { From 3dba9a4a66a7ef4b4cf642277341f6c01cc59eaf Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 29 Mar 2022 20:19:00 +0800 Subject: [PATCH 11/15] [DLMED] fix doc Signed-off-by: Nic Ma --- README.md | 5 ++--- modules/bundles/get_started.ipynb | 28 ++++++++++------------------ runner.sh | 1 + 3 files changed, 13 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 424c526d53..900a839eaf 100644 --- a/README.md +++ b/README.md @@ -178,17 +178,16 @@ Demonstrates the use of the `ThreadBuffer` class used to generate data batches d Illustrate reading NIfTI files and test speed of different transforms on different devices. **modules** +#### [engines](./modules/bundles) +Get started tutorial and concrete training / inference examples for MONAI bundle features. #### [engines](./modules/engines) Training and evaluation examples of 3D segmentation based on UNet3D and synthetic dataset with MONAI workflows, which contains engines, event-handlers, and post-transforms. And GAN training and evaluation example for a medical image generative adversarial network. Easy run training script uses `GanTrainer` to train a 2D CT scan reconstruction network. Evaluation script generates random samples from a trained network. The examples are built with MONAI workflows, mainly contain: trainer/evaluator, handlers, post_transforms, etc. #### [3d_image_transforms](./modules/3d_image_transforms.ipynb) This notebook demonstrates the transformations on volumetric images. - #### [2d_inference_3d_volume](./modules/2d_inference_3d_volume.ipynb) Tutorial that demonstrates how monai `SlidingWindowInferer` can be used when a 3D volume input needs to be provided slice-by-slice to a 2D model and finally, aggregated into a 3D volume. - - #### [autoencoder_mednist](./modules/autoencoder_mednist.ipynb) This tutorial uses the MedNIST hand CT scan dataset to demonstrate MONAI's autoencoder class. The autoencoder is used with an identity encode/decode (i.e., what you put in is what you should get back), as well as demonstrating its usage for de-blurring and de-noising. #### [batch_output_transform](./modules/batch_output_transform.py) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index 1db0248f9f..7e1cb49f84 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -46,22 +46,6 @@ "!python -c \"import monai\" || pip install -q \"monai-weekly[nibabel]\"" ] }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import os\n", - "import shutil\n", - "\n", - "from monai.apps import download_and_extract\n", - "from monai.config import print_config\n", - "from monai.bundle import ConfigParser" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -88,6 +72,14 @@ "# See the License for the specific language governing permissions and\n", "# limitations under the License.\n", "\n", + "import os\n", + "import shutil\n", + "\n", + "from monai.apps import download_and_extract\n", + "from monai.config import print_config\n", + "from monai.bundle import ConfigParser\n", + "\n", + "\n", "print_config()" ] }, @@ -102,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 2, "metadata": { "tags": [] }, @@ -132,7 +124,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 3, "metadata": { "tags": [] }, diff --git a/runner.sh b/runner.sh index 2072f507b5..9290cbb3c6 100755 --- a/runner.sh +++ b/runner.sh @@ -38,6 +38,7 @@ doesnt_contain_max_epochs=("${doesnt_contain_max_epochs[@]}" tcia_csv_processing doesnt_contain_max_epochs=("${doesnt_contain_max_epochs[@]}" transform_visualization.ipynb) doesnt_contain_max_epochs=("${doesnt_contain_max_epochs[@]}" 2d_inference_3d_volume.ipynb) doesnt_contain_max_epochs=("${doesnt_contain_max_epochs[@]}" resample_benchmark.ipynb) +doesnt_contain_max_epochs=("${doesnt_contain_max_epochs[@]}" get_started.ipynb) # output formatting separator="" From 62da8d24ba4b77a7e311a1e7927de249abf39bb2 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 29 Mar 2022 22:20:19 +0800 Subject: [PATCH 12/15] [DLMED] test PEP8 Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index 7e1cb49f84..7610ae9b52 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -155,11 +155,10 @@ ] }, { - "cell_type": "code", - "execution_count": 6, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ + "```\n", "{\n", " \"imports\": [\n", " \"$import glob\",\n", @@ -173,7 +172,8 @@ " \"dataset_dir\": \"/workspace/data/Task09_Spleen\",\n", " \"images\": \"$list(sorted(glob.glob(@dataset_dir + '/imagesTr/*.nii.gz')))\",\n", " \"labels\": \"$list(sorted(glob.glob(@dataset_dir + '/labelsTr/*.nii.gz')))\"\n", - "}" + "}\n", + "```" ] }, { From 44dccc29a05cd3c357ffb92db15c7f7e6714b2b8 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 29 Mar 2022 22:46:46 +0800 Subject: [PATCH 13/15] [DLMED] update according to comments Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 109 ++++++++++++------------------ 1 file changed, 44 insertions(+), 65 deletions(-) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index 7610ae9b52..71cc62ce95 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -10,10 +10,10 @@ "\n", "For more information about MONAI bundle description: https://docs.monai.io/en/latest/mb_specification.html.\n", "\n", - "This notebook is step-by-step tutorial to help get started to develop a bundle package, which contains a `train.json` config file to construct the training pipeline and also have a `metadata.json` file to define the metadata information.\n", + "This notebook is step-by-step tutorial to help get started to develop a bundle package, which contains a config file to construct the training pipeline and also have a `metadata.json` file to define the metadata information.\n", "\n", "This notebook mainly contains below sections:\n", - "- Define a training config\n", + "- Define a training config `JSON` or `YAML` format\n", "- Execute training based on bundle scripts and configs\n", "- Hybrid programming with config and python code\n", "\n", @@ -73,7 +73,6 @@ "# limitations under the License.\n", "\n", "import os\n", - "import shutil\n", "\n", "from monai.apps import download_and_extract\n", "from monai.config import print_config\n", @@ -145,7 +144,7 @@ "source": [ "## Define train config - Set imports and input / output environments\n", "\n", - "Now let's start to define the `train.json` config file for a regular training task.\n", + "Now let's start to define the config file for a regular training task. MONAI bundle support both `JSON` and `YAML` format, here we use `JSON` as example.\n", "\n", "According to the predefined syntax of MONAI bundle, `$` indicates an expression to evaluate, `@` refers to another object in the config content. For more details about the syntax in bundle config, please check: https://github.com/wyli/MONAI/blob/3482-bundle-doc/docs/source/config_syntax.md.\n", "\n", @@ -188,11 +187,10 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ + "```\n", "\"network_def\": {\n", " \"_target_\": \"UNet\",\n", " \"spatial_dims\": 3,\n", @@ -202,7 +200,8 @@ " \"strides\": [2, 2, 2, 2],\n", " \"num_res_units\": 2,\n", " \"norm\": \"batch\"\n", - "}" + "}\n", + "```" ] }, { @@ -213,12 +212,12 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "\"network\": \"$@network_def.to(@device)\"" + "```\n", + "\"network\": \"$@network_def.to(@device)\"\n", + "```" ] }, { @@ -229,11 +228,10 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ + "```\n", "\"loss\": {\n", " \"_target_\": \"DiceCELoss\",\n", " \"to_onehot_y\": true,\n", @@ -245,7 +243,8 @@ " \"_target_\": \"torch.optim.Adam\",\n", " \"params\": \"$@network.parameters()\",\n", " \"lr\": 1e-4\n", - "}" + "}\n", + "```" ] }, { @@ -260,15 +259,15 @@ "```\n", "\"train\": {...},\n", "\"validate\": {...}\n", - "```" + "```\n", + "The composed transforms are for preprocessing." ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ + "```\n", "\"train\": {\n", " \"preprocessing\": {\n", " \"_target_\": \"Compose\",\n", @@ -318,7 +317,8 @@ " }\n", " ]\n", " }\n", - "}" + "}\n", + "```" ] }, { @@ -329,11 +329,10 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ + "```\n", "\"dataset\": {\n", " \"_target_\": \"CacheDataset\",\n", " \"data\": \"$[{'image': i, 'label': l} for i, l in zip(@images[:-9], @labels[:-9])]\",\n", @@ -347,7 +346,8 @@ " \"batch_size\": 2,\n", " \"shuffle\": false,\n", " \"num_workers\": 4\n", - "}" + "}\n", + "```" ] }, { @@ -360,11 +360,10 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ + "```\n", "\"inferer\": {\n", " \"_target_\": \"SimpleInferer\"\n", "},\n", @@ -396,7 +395,8 @@ " \"tag_name\": \"train_loss\",\n", " \"output_transform\": \"$monai.handlers.from_engine(['loss'], first=True)\"\n", " }\n", - "]" + "]\n", + "```" ] }, { @@ -409,17 +409,17 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ + "```\n", "\"key_metric\": {\n", " \"train_accuracy\": {\n", " \"_target_\": \"ignite.metrics.Accuracy\",\n", " \"output_transform\": \"$monai.handlers.from_engine(['pred', 'label'])\"\n", " }\n", - "}" + "}\n", + "```" ] }, { @@ -436,11 +436,10 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ + "```\n", "\"trainer\": {\n", " \"_target_\": \"SupervisedTrainer\",\n", " \"_requires_\": [\"@determinism\", \"@cudnn_opt\"],\n", @@ -455,7 +454,8 @@ " \"key_train_metric\": \"@train#key_metric\",\n", " \"train_handlers\": \"@train#handlers\",\n", " \"amp\": true\n", - "}" + "}\n", + "```" ] }, { @@ -473,11 +473,10 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ + "```\n", "\"validate\": {\n", " \"preprocessing\": {\n", " \"_target_\": \"Compose\",\n", @@ -490,7 +489,8 @@ " \"%train#preprocessing#transforms#6\"\n", " ]\n", " }\n", - "}" + "}\n", + "```" ] }, { @@ -522,7 +522,7 @@ "metadata": {}, "outputs": [], "source": [ - "python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json" + "%python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json" ] }, { @@ -540,7 +540,7 @@ "metadata": {}, "outputs": [], "source": [ - "python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json --device \"\\$torch.device('cuda:1')\"" + "%python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json --device \"\\$torch.device('cuda:1')\"" ] }, { @@ -556,7 +556,7 @@ "metadata": {}, "outputs": [], "source": [ - "python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json --network \"%configs/test.json#network\"" + "%python -m monai.bundle run \"'train#trainer'\" --config_file configs/train.json --network \"%configs/test.json#network\"" ] }, { @@ -626,27 +626,6 @@ "trainer = parser.get_parsed_content(\"train#trainer\")\n", "trainer.run()" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Cleanup data directory\n", - "\n", - "Remove directory if a temporary was used." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "if directory is None:\n", - " shutil.rmtree(root_dir)" - ] } ], "metadata": { From ead4c20d5390996b549d8f51f34d3199073a4c37 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Wed, 30 Mar 2022 19:39:55 +0800 Subject: [PATCH 14/15] [DLMED] update doc Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 32 +++++++++---------- .../spleen_segmentation/configs/metadata.json | 2 +- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index 71cc62ce95..e318cb92df 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -13,16 +13,16 @@ "This notebook is step-by-step tutorial to help get started to develop a bundle package, which contains a config file to construct the training pipeline and also have a `metadata.json` file to define the metadata information.\n", "\n", "This notebook mainly contains below sections:\n", - "- Define a training config `JSON` or `YAML` format\n", + "- Define a training config with `JSON` or `YAML` format\n", "- Execute training based on bundle scripts and configs\n", "- Hybrid programming with config and python code\n", "\n", "You can find the usage examples of MONAI bundle key features and syntax in this tutorial, like:\n", "- Instantiate a python object from a dictionary config with `_target_` indicating class or function name or module path.\n", "- Execute python expression from a string config with the `$` syntax.\n", - "- Refer to other object with the `@` syntax.\n", - "- Require other config items to execute or instantiate first with the `_requires_` syntax.\n", - "- Macro replacement with the `%` syntax to simplify the config content.\n", + "- Refer to other python object with the `@` syntax.\n", + "- Require other independent config items to execute or instantiate first with the `_requires_` syntax.\n", + "- Macro text replacement with the `%` syntax to simplify the config content.\n", "- Leverage the `_disabled_` syntax to tune or debug different components.\n", "- Override config content at runtime.\n", "- Hybrid programming with config and python code.\n", @@ -157,7 +157,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", + "```json\n", "{\n", " \"imports\": [\n", " \"$import glob\",\n", @@ -190,7 +190,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", + "```json\n", "\"network_def\": {\n", " \"_target_\": \"UNet\",\n", " \"spatial_dims\": 3,\n", @@ -215,7 +215,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", + "```json\n", "\"network\": \"$@network_def.to(@device)\"\n", "```" ] @@ -231,7 +231,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", + "```json\n", "\"loss\": {\n", " \"_target_\": \"DiceCELoss\",\n", " \"to_onehot_y\": true,\n", @@ -256,7 +256,7 @@ "Define `transforms` and `dataset`, `dataloader` to generate training data for network.\n", "\n", "To make the config stucture clear, here we split the `train` and `validate` related components into 2 sections:\n", - "```\n", + "```json\n", "\"train\": {...},\n", "\"validate\": {...}\n", "```\n", @@ -267,7 +267,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", + "```json\n", "\"train\": {\n", " \"preprocessing\": {\n", " \"_target_\": \"Compose\",\n", @@ -332,7 +332,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", + "```json\n", "\"dataset\": {\n", " \"_target_\": \"CacheDataset\",\n", " \"data\": \"$[{'image': i, 'label': l} for i, l in zip(@images[:-9], @labels[:-9])]\",\n", @@ -363,7 +363,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", + "```json\n", "\"inferer\": {\n", " \"_target_\": \"SimpleInferer\"\n", "},\n", @@ -412,7 +412,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", + "```json\n", "\"key_metric\": {\n", " \"train_accuracy\": {\n", " \"_target_\": \"ignite.metrics.Accuracy\",\n", @@ -439,7 +439,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", + "```json\n", "\"trainer\": {\n", " \"_target_\": \"SupervisedTrainer\",\n", " \"_requires_\": [\"@determinism\", \"@cudnn_opt\"],\n", @@ -469,14 +469,14 @@ "Here we don't define the `validate` section step by step as it's similar to the `train` section. The full config is available: \n", "https://github.com/Project-MONAI/tutorials/blob/master/modules/bundles/spleen_segmentation/configs/train.json\n", "\n", - "Just show an example of `macro replacement` to simplify the config content and avoid duplicated text. Please note that it's just token text replacement of the config content, not refer to the same objects." + "Just show an example of `macro text replacement` to simplify the config content and avoid duplicated text. Please note that it's just token text replacement of the config content, not refer to the instantiated python objects." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", + "```json\n", "\"validate\": {\n", " \"preprocessing\": {\n", " \"_target_\": \"Compose\",\n", diff --git a/modules/bundles/spleen_segmentation/configs/metadata.json b/modules/bundles/spleen_segmentation/configs/metadata.json index 7a53694758..c12ae411f1 100644 --- a/modules/bundles/spleen_segmentation/configs/metadata.json +++ b/modules/bundles/spleen_segmentation/configs/metadata.json @@ -1,5 +1,5 @@ { - "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_202203171008.json", + "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", "version": "0.1.0", "changelog": { "0.1.0": "complete the model package", From f540112b630fa86f7922af97fbd4b4314c9c45b2 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Wed, 30 Mar 2022 23:37:01 +0800 Subject: [PATCH 15/15] [DLMED] update links Signed-off-by: Nic Ma --- modules/bundles/get_started.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/bundles/get_started.ipynb b/modules/bundles/get_started.ipynb index e318cb92df..3e878aa2d9 100644 --- a/modules/bundles/get_started.ipynb +++ b/modules/bundles/get_started.ipynb @@ -8,7 +8,7 @@ "\n", "MONAI bundle usually includes the stored weights of a model, TorchScript model, JSON files that include configs and metadata about the model, information for constructing training, inference, and post-processing transform sequences, plain-text description, legal information, and other data the model creator wishes to include.\n", "\n", - "For more information about MONAI bundle description: https://docs.monai.io/en/latest/mb_specification.html.\n", + "For more information about MONAI bundle description: https://docs.monai.io/en/latest/bundle_intro.html.\n", "\n", "This notebook is step-by-step tutorial to help get started to develop a bundle package, which contains a config file to construct the training pipeline and also have a `metadata.json` file to define the metadata information.\n", "\n", @@ -146,7 +146,7 @@ "\n", "Now let's start to define the config file for a regular training task. MONAI bundle support both `JSON` and `YAML` format, here we use `JSON` as example.\n", "\n", - "According to the predefined syntax of MONAI bundle, `$` indicates an expression to evaluate, `@` refers to another object in the config content. For more details about the syntax in bundle config, please check: https://github.com/wyli/MONAI/blob/3482-bundle-doc/docs/source/config_syntax.md.\n", + "According to the predefined syntax of MONAI bundle, `$` indicates an expression to evaluate, `@` refers to another object in the config content. For more details about the syntax in bundle config, please check: https://docs.monai.io/en/latest/config_syntax.html.\n", "\n", "Please note that MONAI bundle doesn't require any hard-code logic in the config, so users can define the config content in any structure.\n", "\n",