diff --git a/.readthedocs.yml b/.readthedocs.yml index 0fa357b9ca..9a68f0626e 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -7,7 +7,7 @@ version: 2 # Build documentation in the docs/ directory with Sphinx sphinx: - configuration: docs/conf.py + configuration: docs/source/conf.py # Build documentation with MkDocs #mkdocs: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 043d6c20dd..fa7527db87 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -57,6 +57,15 @@ License information: all source code files should start with this paragraph: ``` +### Building the documentation +To build documentation via Sphinx in`docs/` folder: +```bash +cd docs/ +make html +``` +The above commands build html documentation. Type `make help` for all supported formats, +type `make clean` to remove the current build files. + ## Unit testing MONAI tests are located under `tests/`. diff --git a/README.md b/README.md index ac077c5ebd..3fc9c31f87 100644 --- a/README.md +++ b/README.md @@ -1,243 +1,38 @@ # Project MONAI -**M**edical **O**pen **N**etwork for **AI** - _Toolkit for Healthcare Imaging_ +**M**edical **O**pen **N**etwork for **AI** -_Contact: _ +[![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)](https://opensource.org/licenses/Apache-2.0) [![pipeline status](https://gitlab.com/project-monai/MONAI/badges/master/pipeline.svg)](https://github.com/Project-MONAI/MONAI/commits/master) [![Documentation Status](https://readthedocs.org/projects/monai/badge/?version=latest)](https://monai.readthedocs.io/en/latest/?badge=latest) [![coverage report](https://gitlab.com/project-monai/MONAI/badges/master/coverage.svg)](https://gitlab.com/project-monai/MONAI/pipelines/) -This document identifies key concepts of project MONAI at a high level, the goal is to facilitate further technical discussions of requirements,roadmap, feasibility and trade-offs. -## Vision - * Develop a community of academic, industrial and clinical researchers collaborating and working on a common foundation of standardized tools. - * Create a state-of-the-art, end-to-end training toolkit for healthcare imaging. - * Provide academic and industrial researchers with the optimized and standardized way to create and evaluate models +MONAI is a [PyTorch](https://pytorch.org/)-based, [open-source](https://github.com/Project-MONAI/MONAI/blob/master/LICENSE) platform for deep learning in healthcare imaging. Its ambitions are: +- developing a community of academic, industrial and clinical researchers collaborating on a common foundation; +- creating state-of-the-art, end-to-end training workflows for healthcare imaging; +- providing researchers with the optimized and standardized way to create and evaluate deep learning models. -## Targeted users - * Primarily focused on the healthcare researchers who develop DL models for medical imaging -## Goals - * Deliver domain-specific workflow capabilities - * Address the end-end “Pain points” when creating medical imaging deep learning workflows. - * Provide a robust foundation with a performance optimized system software stack that allows researchers to focus on the research and not worry about software development principles. +## Features +> _The codebase is currently under active development._ -## Guiding principles -### Modularity - * Pythonic -- object oriented components - * Compositional -- can combine components to create workflows - * Extensible -- easy to create new components and extend existing components - * Easy to debug -- loosely coupled, easy to follow code (e.g. in eager or graph mode) - * Flexible -- interfaces for easy integration of external modules -### User friendly - * Portable -- use components/workflows via Python “import” - * Run well-known baseline workflows in a few commands - * Access to the well-known public datasets in a few lines of code -### Standardisation - * Unified/consistent component APIs with documentation specifications - * Unified/consistent data and model formats, compatible with other existing standards -### High quality - * Consistent coding style - extensive documentation - tutorials - contributors’ guidelines - * Reproducibility -- e.g. system-specific deterministic training -### Future proof - * Task scalability -- both in datasets and computational resources - * Support for advanced data structures -- e.g. graphs/structured text documents -### Leverage existing high-quality software packages whenever possible - * E.g. low-level medical image format reader, image preprocessing with external packages - * Rigorous risk analysis of choice of foundational software dependencies -### Compatible with external software - * E.g. data visualisation, experiments tracking, management, orchestration +- flexible pre-processing for multi-dimensional medical imaging data; +- compositional & portable APIs for ease of integration in existing workflows; +- domain-specific implementations for networks, losses, evaluation metrics and more; +- customizable design for varying user expertise; +- multi-GPU data parallelism support. -## Key capabilities +## Getting Started - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Basic features - Example - Notes -
Ready-to-use workflows - Volumetric image segmentation - “Bring your own dataset” -
Baseline/reference network architectures - Provide an option to use “U-Net” - -
Intuitive command-line interfaces - - -
Multi-gpu training - Configure the workflow to run data parallel training - -
+Tutorials & examples are located at [monai/examples](https://github.com/Project-MONAI/MONAI/tree/master/examples). +Technical documentation is available via [Read the Docs](https://monai.readthedocs.io/en/latest/). +## Contributing +For guidance on making a contribution to MONAI, see the [contributing guidelines](https://github.com/Project-MONAI/MONAI/blob/master/CONTRIBUTING.md). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Customisable Python interfaces - Example - Notes -
Training/validation strategies - Schedule a strategy of alternating between generator and discriminator model training - -
Network architectures - Define new networks w/ the recent “Squeeze-and-Excitation” blocks - “Bring your own model” -
Data preprocessors - Define a new reader to read training data from a database system - -
Adaptive training schedule - Stop training when the loss becomes “NaN” - “Callbacks” -
Configuration-driven workflow assembly - Making workflow instances from configuration file - Convenient for managing hyperparameters -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Model sharing & transfer learning - Example - Notes -
Sharing model parameters, hyperparameter configurations - Standardisation of model archiving format - -
Model optimisation for deployment - - -
Fine-tuning from pre-trained models - Model compression, TensorRT - -
Model interpretability - Visualising feature maps of a trained model - -
Experiment tracking & management - - https://polyaxon.com/ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Advanced features - Example - Notes -
Compatibility with external toolkits - XNAT as data source, ITK as preprocessor - -
Advanced learning strategies - Semi-supervised, active learning - -
High performance preprocessors - Smart caching, multi-process - -
Multi-node distributed training - - -
- +## Links +- Website: _(coming soon)_ +- API documentation: https://monai.readthedocs.io/en/latest/ +- Code: https://github.com/Project-MONAI/MONAI +- Project tracker: https://github.com/Project-MONAI/MONAI/projects +- Issue tracker: https://github.com/Project-MONAI/MONAI/issues +- Wiki: https://github.com/Project-MONAI/MONAI/wiki +- Test status: https://gitlab.com/project-monai/MONAI/pipelines diff --git a/docs/Makefile b/docs/Makefile index e3e3658fe5..bea205e654 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -6,7 +6,7 @@ SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = source -BUILDDIR = ../docs +BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @@ -17,12 +17,8 @@ help: # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile - sphinx-apidoc -f -o "$(SOURCEDIR)"/apidocs ../monai - rm -rf ../docs/* @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - mv ../docs/html/* ../docs/ - rm -rf ../docs/html ../docs/doctrees clean: - rm -rf ../docs/* + rm -rf build/ rm -rf source/apidocs diff --git a/docs/images/end_to_end_process.png b/docs/images/end_to_end_process.png new file mode 100644 index 0000000000..e837f64a93 Binary files /dev/null and b/docs/images/end_to_end_process.png differ diff --git a/docs/images/sliding_window.png b/docs/images/sliding_window.png new file mode 100644 index 0000000000..3cd3aeea10 Binary files /dev/null and b/docs/images/sliding_window.png differ diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index 9abd415f49..0000000000 --- a/docs/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. MONAI documentation master file, created by - sphinx-quickstart on Wed Feb 5 09:40:29 2020. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to MONAI's documentation! -================================= - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - MONAI API reference - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/conf.py b/docs/source/conf.py similarity index 73% rename from docs/conf.py rename to docs/source/conf.py index 5cebe24689..338e9661b4 100644 --- a/docs/conf.py +++ b/docs/source/conf.py @@ -14,6 +14,7 @@ import sys import subprocess sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) print(sys.path) @@ -21,16 +22,30 @@ # -- Project information ----------------------------------------------------- project = 'MONAI' -copyright = '2020, MONAI Consortium' -author = 'MONAI Consortium' +copyright = '2020, MONAI Contributors' +author = 'MONAI Contributors' # The full version, including alpha/beta/rc tags -release = 'v0.1' -version = 'v0.1' +release = 'public alpha' +version = 'public alpha' + + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [os.path.join('transforms', 'compose.py'), + os.path.join('transforms', 'adaptors.py'), + os.path.join('transforms', 'composables.py'), + os.path.join('transforms', 'transforms.py'), + os.path.join('networks', 'blocks'), + os.path.join('networks', 'layers'), + os.path.join('networks', 'nets'), + 'metrics', 'engine', 'data', 'handlers', 'losses', 'visualize', 'utils', 'tests'] + def generate_apidocs(*args): """Generate API docs automatically by trawling the available modules""" - module_path = os.path.abspath('..') + module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'monai')) output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'apidocs')) apidoc_command_path = 'sphinx-apidoc' if hasattr(sys, 'real_prefix'): # called from a virtualenv @@ -39,9 +54,10 @@ def generate_apidocs(*args): print('output_path {}'.format(output_path)) print('module_path {}'.format(module_path)) subprocess.check_call( - [apidoc_command_path, '-f'] + + [apidoc_command_path, '-f', '-e'] + ['-o', output_path] + - [module_path]) + [module_path] + + [os.path.join(module_path, p) for p in exclude_patterns]) def setup(app): @@ -77,11 +93,6 @@ def setup(app): # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] - # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for @@ -96,11 +107,18 @@ def setup(app): 'sticky_navigation': True, # Set to False to disable the sticky nav while scrolling. # 'logo_only': True, # if we have a html_logo below, this shows /only/ the logo with no title text } +html_context = { + 'display_github': True, + 'github_user': 'Project-MONAI', + 'github_repo': 'MONAI', + 'github_version': 'master', + 'conf_py_path': '/docs/', +} html_scaled_image_link = False -html_show_sourcelink = True -html_favicon = 'favicon.ico' +html_show_sourcelink = True +# html_favicon = 'favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +# html_static_path = ['_static'] diff --git a/docs/source/data.rst b/docs/source/data.rst new file mode 100644 index 0000000000..8cd41dc58b --- /dev/null +++ b/docs/source/data.rst @@ -0,0 +1,55 @@ +:github_url: https://github.com/Project-MONAI/MONAI + +.. _data: + +Data +==== + +Generic Interfaces +------------------ +.. automodule:: monai.data.dataset +.. currentmodule:: monai.data.dataset + +`Dataset` +~~~~~~~~~ +.. autoclass:: Dataset + :members: + :special-members: __getitem__ + + +Patch-based dataset +------------------- + +`GridPatchDataset` +~~~~~~~~~~~~~~~~~~ +.. automodule:: monai.data.grid_dataset +.. currentmodule:: monai.data.grid_dataset +.. autoclass:: GridPatchDataset + :members: + + +Nifti format handling +--------------------- + +Reading +~~~~~~~ +.. automodule:: monai.data.nifti_reader + :members: + +Writing +~~~~~~~ +.. automodule:: monai.data.nifti_writer + :members: + + +Synthetic +--------- +.. automodule:: monai.data.synthetic + :members: + + +Utilities +--------- +.. automodule:: monai.data.utils + :members: + diff --git a/docs/source/engines.rst b/docs/source/engines.rst new file mode 100644 index 0000000000..e5cbc64b02 --- /dev/null +++ b/docs/source/engines.rst @@ -0,0 +1,12 @@ +:github_url: https://github.com/Project-MONAI/MONAI + +.. _engines: + +Engines +======= + +Multi-GPU data parallel +----------------------- + +.. automodule:: monai.engine.multi_gpu_supervised_trainer + :members: diff --git a/docs/source/handlers.rst b/docs/source/handlers.rst new file mode 100644 index 0000000000..811a7e2d6a --- /dev/null +++ b/docs/source/handlers.rst @@ -0,0 +1,41 @@ +:github_url: https://github.com/Project-MONAI/MONAI + +.. _handlers: + +Event handlers +============== + +Checkpoint loader +----------------- +.. automodule:: monai.handlers.checkpoint_loader + :members: + +CSV saver +--------- +.. automodule:: monai.handlers.classification_saver + :members: + +Mean Dice metrics handler +------------------------- +.. automodule:: monai.handlers.mean_dice + :members: + +Metrics logger +--------------- +.. automodule:: monai.handlers.metric_logger + :members: + +Segmentation saver +------------------ +.. automodule:: monai.handlers.segmentation_saver + :members: + +Training stats handler +---------------------- +.. automodule:: monai.handlers.stats_handler + :members: + +Tensorboard handler +-------------------- +.. automodule:: monai.handlers.tensorboard_handlers + :members: diff --git a/docs/source/highlights.md b/docs/source/highlights.md new file mode 100644 index 0000000000..d14a776a3d --- /dev/null +++ b/docs/source/highlights.md @@ -0,0 +1,79 @@ +# Modules for public alpha + +MONAI aims at supporting deep learning in medical image analysis at multiple granuality. +This figure shows modules currently available in the codebase. +![image](../images/end_to_end_process.png) +The rest of this page provides more details for each module. + +* [Image transformations](#image-transformations) +* [Loss functions](#losses) +* [Network architectures](#network-architectures) +* [Evaluation](#evaluation) +* [Visualization](#visualization) +* [Result writing](#result-writing) + +## Image transformations +Medical image data pre-processing is challenging. Data are often in specialized formats with rich meta information; and the data volumes are often high-dimensional and requiring carefully designed manipulation procedures. As an important part of MONAI, powerful and flexible image transformations are provided to enable user-friednly, reproducible, optimized medical data pre-processing piepline. + +### 1. Transforms support both Dictionary and Array format data +1. The widely used computer vision packages (such as ``torchvision``) focus on spatially 2D array image processing. MONAI provides more domain specific transformations for both sptially 2D and 3D, and retains the flexible transformation "compose" feature. +2. As medical image preprocessing often requires additional fine-grained system parameters, MONAI provides transforms for input data encapsulated in a python dictionary. Users are able to specify the keys corresponding to the expected data fields and system parameters to compose complex transformations. + +### 2. Medical specific transforms +MONAI aims at providing a rich set of popular medical image specific transformamtions. These currently include, for example: + + +- `LoadNifti`: Load Nifti format file from provided path +- `Spacing`: Resample input image into the specified `pixdim` +- `Orientation`: Change image's orientation into the specified `axcodes` +- `GaussianNoise`: Pertubate image intensities by adding statistical noises +- `IntensityNormalizer`: Intensity Normalization based on mean and standard deviation +- `Affine`: Transform image based on the affine parameters +- `Rand2DElastic`: Random elastic deformation and affine in 2D +- `Rand3DElastic`: Random elastic deformation and affine in 3D + +### 3. Fused spatial transforms and GPU acceleration +As medical image volumes are usually large (in multi-dimensional arrays), pre-processing performance obviously affects the overall pipeline speed. MONAI provides affine transforms to execute fused spatial operations, supports GPU acceleration via native PyTorch to achieve high performance. +Example code: +```py +# create an Affine transform +affine = Affine( + rotate_params=np.pi/4, + scale_params=(1.2, 1.2), + translate_params=(200, 40), + padding_mode='zeros', + device=torch.device('cuda:0') +) +# convert the image using interpolation mode +new_img = affine(image, spatial_size=(300, 400), mode='bilinear') +``` +### 4. Randomly crop out batch images based on positive/negative ratio +Medical image data volume may be too large to fit into GPU memory. A widely-used approach is to randomly draw small size data samples during training. MONAI currrently provides uniform random sampling strategy as well as class-balanced fixed ratio sampling which may help stabilize the patch-based training process. + +## Losses +There are domain-specific loss functions in the medical research area which are different from the generic computer vision ones. As an important module of MONAI, these loss functions are implemented in PyTorch, such as Dice loss and generalized Dice loss. + +## Network architectures +Some deep neural network architectures have shown to be particularly effective for medical imaging analysis tasks. MONAI implements reference networks with the aims of both flexibility and code readability. + +## Evaluation +To run model inferences and evaluate the model quality, MONAI provides reference implementation for the relevant widely-used approaches. Currently several popular evaluation metrics and inference patterns are included: + +### 1. Sliding window inference +When executing inference on large medical images, the sliding window is a popular method to achieve high performance with flexible memory requirements. +1. Select continuous windows on the original image. +2. Execute a batch of windows on the model per time, and complete all windows. +3. Connect all the model outputs to construct one segmentation corresponding to the original image. +4. Save segmentation result to file or compute metrics. +![image](../images/sliding_window.png) + +### 2. Metrics for medical tasks +There are many useful metrics to measure medical specific tasks, MONAI already implemented Mean Dice and AUC, will integrate more soon. + +## Visualization +Besides common curves of statistics on TensorBoard, in order to provide straight-forward checking of 3D image and the corresponding label and segmentation output, MONAI can visualize 3D data as GIF animation on TensorBoard which can help users quickly check model output. + +## Result writing +For the segmentation task, MONAI supports to save model output as NIFTI format image and add affine information from the corresponding input image. + +For the classification task, MONAI supports to save classification result as a CSV file. diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000000..240ea57ec4 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,80 @@ +:github_url: https://github.com/Project-MONAI/MONAI + +.. MONAI documentation master file, created by + sphinx-quickstart on Wed Feb 5 09:40:29 2020. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Project MONAI +============= + + +*Medical Open Network for AI* + +MONAI is a `PyTorch `_-based, `open-source `_ platform +for deep learning in healthcare imaging. Its ambitions are: + +- developing a community of academic, industrial and clinical researchers collaborating on a common foundation; +- creating state-of-the-art, end-to-end training workflows for healthcare imaging; +- providing researchers with the optimized and standardized way to create and evaluate deep learning models. + +Features +-------- +*The codebase is currently under active development* + +- flexible pre-processing for multi-dimensional medical imaging data; +- compositional & portable APIs for ease of integration in existing workflows; +- domain-specific implementations for networks, losses, evaluation metrics and more; +- customizable design for varying user expertise; +- multi-GPU data parallelism support. + + +Getting started +--------------- + +Tutorials & examples are located at `monai/examples `_. + +Technical documentation is available via `Read the Docs `_. + + +Technical highlights +-------------------- +- `public alpha `_ + +.. toctree:: + :maxdepth: 1 + :caption: APIs + + transform_api + losses + networks + metrics + data + engines + handlers + visualize + utils + + +Contributing +------------ +For guidance on making a contribution to MONAI, see the `contributing guidelines +`_. + +Links +----- +- Website: _(coming soon)_ +- API documentation: https://monai.readthedocs.io/en/latest/ +- Code: https://github.com/Project-MONAI/MONAI +- Project tracker: https://github.com/Project-MONAI/MONAI/projects +- Issue tracker: https://github.com/Project-MONAI/MONAI/issues +- Wiki: https://github.com/Project-MONAI/MONAI/wiki +- Test status: https://gitlab.com/project-monai/MONAI/pipelines + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` + diff --git a/docs/source/losses.rst b/docs/source/losses.rst new file mode 100644 index 0000000000..50e4563ca1 --- /dev/null +++ b/docs/source/losses.rst @@ -0,0 +1,23 @@ +:github_url: https://github.com/Project-MONAI/MONAI + +.. _losses: + +Loss functions +============== + +Segmentation Losses +------------------- + +.. automodule:: monai.losses.dice +.. currentmodule:: monai.losses.dice + + +`DiceLoss` +~~~~~~~~~~~ +.. autoclass:: DiceLoss + :members: + +`GeneralizedDiceLoss` +~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: GeneralizedDiceLoss + :members: diff --git a/docs/source/metrics.rst b/docs/source/metrics.rst new file mode 100644 index 0000000000..9f2de95f7d --- /dev/null +++ b/docs/source/metrics.rst @@ -0,0 +1,17 @@ +:github_url: https://github.com/Project-MONAI/MONAI + +.. _metrics: + +Metrics +======== + +Segmentation metrics +-------------------- + +.. automodule:: monai.metrics.compute_meandice +.. currentmodule:: monai.metrics.compute_meandice + + +`compute_meandice` +~~~~~~~~~~~~~~~~~~~ +.. automethod:: monai.metrics.compute_meandice.compute_meandice diff --git a/docs/source/networks.rst b/docs/source/networks.rst new file mode 100644 index 0000000000..5d456c1528 --- /dev/null +++ b/docs/source/networks.rst @@ -0,0 +1,94 @@ +:github_url: https://github.com/Project-MONAI/MONAI + +.. _networkss: + +Network architectures +===================== + + +Blocks +------ +.. automodule:: monai.networks.blocks.convolutions +.. currentmodule:: monai.networks.blocks.convolutions + + +`Convolution` +~~~~~~~~~~~~~ +.. autoclass:: Convolution + :members: + +`ResidualUnit` +~~~~~~~~~~~~~~ +.. autoclass:: ResidualUnit + :members: + + +Layers +------ + +`get_conv_type` +~~~~~~~~~~~~~~~ +.. automethod:: monai.networks.layers.factories.get_conv_type + +`get_dropout_type` +~~~~~~~~~~~~~~~~~~ +.. automethod:: monai.networks.layers.factories.get_dropout_type + +`get_normalize_type` +~~~~~~~~~~~~~~~~~~~~ +.. automethod:: monai.networks.layers.factories.get_normalize_type + +`get_maxpooling_type` +~~~~~~~~~~~~~~~~~~~~~ +.. automethod:: monai.networks.layers.factories.get_maxpooling_type + +`get_avgpooling_type` +~~~~~~~~~~~~~~~~~~~~~ +.. automethod:: monai.networks.layers.factories.get_avgpooling_type + + +.. automodule:: monai.networks.layers.simplelayers +.. currentmodule:: monai.networks.layers.simplelayers + +`SkipConnection` +~~~~~~~~~~~~~~~~ +.. autoclass:: SkipConnection + :members: + +`Flatten` +~~~~~~~~~~ +.. autoclass:: Flatten + :members: + +`GaussianFilter` +~~~~~~~~~~~~~~~~ +.. autoclass:: GaussianFilter + :members: + :special-members: __call__ + + +Nets +---- + +.. automodule:: monai.networks.nets +.. currentmodule:: monai.networks.nets + + +`Densenet3D` +~~~~~~~~~~~~ +.. automodule:: monai.networks.nets.densenet3d + :members: +.. automethod:: monai.networks.nets.densenet3d.densenet121 +.. automethod:: monai.networks.nets.densenet3d.densenet169 +.. automethod:: monai.networks.nets.densenet3d.densenet201 +.. automethod:: monai.networks.nets.densenet3d.densenet264 + +`Highresnet` +~~~~~~~~~~~~ +.. automodule:: monai.networks.nets.highresnet + :members: + +`Unet` +~~~~~~ +.. automodule:: monai.networks.nets.unet + :members: diff --git a/docs/source/transform_api.rst b/docs/source/transform_api.rst new file mode 100644 index 0000000000..68608c7148 --- /dev/null +++ b/docs/source/transform_api.rst @@ -0,0 +1,354 @@ +:github_url: https://github.com/Project-MONAI/MONAI + +.. _transform_api: + +Transforms +========== + + +Generic Interfaces +------------------ + +.. automodule:: monai.transforms.compose +.. currentmodule:: monai.transforms.compose + + +`Transform` +~~~~~~~~~~~ +.. autoclass:: Transform + :members: + :special-members: __call__ + + +`MapTransform` +~~~~~~~~~~~~~~ +.. autoclass:: MapTransform + :members: + :special-members: __call__ + + +`Randomizable` +~~~~~~~~~~~~~~ +.. autoclass:: Randomizable + :members: + +`Compose` +~~~~~~~~~ +.. autoclass:: Compose + :members: + :special-members: __call__ + + +Vanilla Transforms +------------------ + +.. automodule:: monai.transforms.transforms +.. currentmodule:: monai.transforms.transforms + +`Spacing` +~~~~~~~~~ +.. autoclass:: Spacing + :members: + :special-members: __call__ + +`Orientation` +~~~~~~~~~~~~~ +.. autoclass:: Orientation + :members: + :special-members: __call__ + +`LoadNifti` +~~~~~~~~~~~ +.. autoclass:: LoadNifti + :members: + :special-members: __call__ + +`AsChannelFirst` +~~~~~~~~~~~~~~~~ +.. autoclass:: AsChannelFirst + :members: + :special-members: __call__ + +`AddChannel` +~~~~~~~~~~~~ +.. autoclass:: AddChannel + :members: + :special-members: __call__ + +`Transpose` +~~~~~~~~~~~ +.. autoclass:: Transpose + :members: + :special-members: __call__ + +`Rescale` +~~~~~~~~~ +.. autoclass:: Rescale + :members: + :special-members: __call__ + +`GaussianNoise` +~~~~~~~~~~~~~~~ +.. autoclass:: GaussianNoise + :members: + :special-members: __call__ + +`Flip` +~~~~~~ +.. autoclass:: Flip + :members: + :special-members: __call__ + +`Resize` +~~~~~~~~ +.. autoclass:: Resize + :members: + :special-members: __call__ + +`Rotate` +~~~~~~~~ +.. autoclass:: Rotate + :members: + :special-members: __call__ + +`Zoom` +~~~~~~ +.. autoclass:: Zoom + :members: + :special-members: __call__ + +`ToTensor` +~~~~~~~~~~ +.. autoclass:: ToTensor + :members: + :special-members: __call__ + +`UniformRandomPatch` +~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: UniformRandomPatch + :members: + :special-members: __call__ + +`IntensityNormalizer` +~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: IntensityNormalizer + :members: + :special-members: __call__ + +`ImageEndPadder` +~~~~~~~~~~~~~~~~ +.. autoclass:: ImageEndPadder + :members: + :special-members: __call__ + +`Rotate90` +~~~~~~~~~~ +.. autoclass:: Rotate90 + :members: + :special-members: __call__ + +`RandRotate90` +~~~~~~~~~~~~~~ +.. autoclass:: RandRotate90 + :members: + :special-members: __call__ + +`SpatialCrop` +~~~~~~~~~~~~~ +.. autoclass:: SpatialCrop + :members: + :special-members: __call__ + +`RandRotate` +~~~~~~~~~~~~ +.. autoclass:: RandRotate + :members: + :special-members: __call__ + +`RandFlip` +~~~~~~~~~~ +.. autoclass:: RandFlip + :members: + :special-members: __call__ + +`RandZoom` +~~~~~~~~~~ +.. autoclass:: RandZoom + :members: + :special-members: __call__ + +`Affine` +~~~~~~~~ +.. autoclass:: Affine + :members: + :special-members: __call__ + +`RandAffine` +~~~~~~~~~~~~ +.. autoclass:: RandAffine + :members: + :special-members: __call__ + +`Rand2DElastic` +~~~~~~~~~~~~~~~ +.. autoclass:: Rand2DElastic + :members: + :special-members: __call__ + +`Rand3DElastic` +~~~~~~~~~~~~~~~ +.. autoclass:: Rand3DElastic + :members: + :special-members: __call__ + + +Dictionary-based Composables +---------------------------- + +.. automodule:: monai.transforms.composables +.. currentmodule:: monai.transforms.composables + +`Spacingd` +~~~~~~~~~~ +.. autoclass:: Spacingd + :members: + :special-members: __call__ + +`Orientationd` +~~~~~~~~~~~~~~ +.. autoclass:: Orientationd + :members: + :special-members: __call__ + +`LoadNiftid` +~~~~~~~~~~~~ +.. autoclass:: LoadNiftid + :members: + :special-members: __call__ + +`AsChannelFirstd` +~~~~~~~~~~~~~~~~~ +.. autoclass:: AsChannelFirstd + :members: + :special-members: __call__ + +`AddChanneld` +~~~~~~~~~~~~~ +.. autoclass:: AddChanneld + :members: + :special-members: __call__ + +`Rotate90d` +~~~~~~~~~~~ +.. autoclass:: Rotate90d + :members: + :special-members: __call__ + +`Rescaled` +~~~~~~~~~~ +.. autoclass:: Rescaled + :members: + :special-members: __call__ + +`Resized` +~~~~~~~~~ +.. autoclass:: Resized + :members: + :special-members: __call__ + +`UniformRandomPatchd` +~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: UniformRandomPatchd + :members: + :special-members: __call__ + +`RandRotate90d` +~~~~~~~~~~~~~~~ +.. autoclass:: RandRotate90d + :members: + :special-members: __call__ + +`RandCropByPosNegLabeld` +~~~~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: RandCropByPosNegLabeld + :members: + :special-members: __call__ + +`RandAffined` +~~~~~~~~~~~~~ +.. autoclass:: RandAffined + :members: + :special-members: __call__ + +`Rand2DElasticd` +~~~~~~~~~~~~~~~~ +.. autoclass:: Rand2DElasticd + :members: + :special-members: __call__ + +`Rand3DElasticd` +~~~~~~~~~~~~~~~~ +.. autoclass:: Rand3DElasticd + :members: + :special-members: __call__ + +`Flipd` +~~~~~~~ +.. autoclass:: Flipd + :members: + :special-members: __call__ + +`RandFlipd` +~~~~~~~~~~~ +.. autoclass:: RandFlipd + :members: + :special-members: __call__ + +`Rotated` +~~~~~~~~~ +.. autoclass:: Rotated + :members: + :special-members: __call__ + +`RandRotated` +~~~~~~~~~~~~~ +.. autoclass:: RandRotated + :members: + :special-members: __call__ + +`Zoomd` +~~~~~~~ +.. autoclass:: Zoomd + :members: + :special-members: __call__ + +`RandZoomd` +~~~~~~~~~~~ +.. autoclass:: RandZoomd + :members: + :special-members: __call__ + +`DeleteKeysd` +~~~~~~~~~~~~~ +.. autoclass:: DeleteKeysd + :members: + :special-members: __call__ + +Transform Adaptors +------------------ + +.. automodule:: monai.transforms.adaptors +.. currentmodule:: monai.transforms.adaptors + +`adaptor` +~~~~~~~~~ +.. automethod:: monai.transforms.adaptors.adaptor + +`apply_alias` +~~~~~~~~~~~~~ +.. automethod:: monai.transforms.adaptors.apply_alias + + +`to_kwargs` +~~~~~~~~~~~ +.. automethod:: monai.transforms.adaptors.to_kwargs diff --git a/docs/source/utils.rst b/docs/source/utils.rst new file mode 100644 index 0000000000..0d37d15dc5 --- /dev/null +++ b/docs/source/utils.rst @@ -0,0 +1,20 @@ +:github_url: https://github.com/Project-MONAI/MONAI + +.. _utils: + +Utils +===== + +Sliding window inference +------------------------ + +.. automodule:: monai.utils.sliding_window_inference + :members: + +Module utils +------------ +.. automodule:: monai.utils.module + +Aliases +------- +.. automodule:: monai.utils.aliases diff --git a/docs/source/visualize.rst b/docs/source/visualize.rst new file mode 100644 index 0000000000..e6506f6849 --- /dev/null +++ b/docs/source/visualize.rst @@ -0,0 +1,12 @@ +:github_url: https://github.com/Project-MONAI/MONAI + +.. _visualize: + +Visualizations +============== + +Tensorboard visuals +------------------- + +.. automodule:: monai.visualize.img2tensorboard + :members: diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 4b3221d19d..8e5bb7b0a6 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -16,13 +16,14 @@ @export("monai.data") class Dataset(torch.utils.data.Dataset): """ - General Dataset to handle dictionary format data, it can operate transforms for specific fields. - For example, typical input data can be a list of dictionaries: - [{ { { - 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz', - 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz', - 'extra': 123 'extra': 456 'extra': 789 - }, }, }] + Generic dataset to handle dictionary format data, it can operate transforms for specific fields. + For example, typical input data can be a list of dictionaries:: + + [{ { { + 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz', + 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz', + 'extra': 123 'extra': 456 'extra': 789 + }, }, }] """ def __init__(self, data, transform=None): diff --git a/monai/data/synthetic.py b/monai/data/synthetic.py index 063c16a965..1c49454c8e 100644 --- a/monai/data/synthetic.py +++ b/monai/data/synthetic.py @@ -16,10 +16,10 @@ def create_test_image_2d(width, height, num_objs=12, rad_max=30, noise_max=0.0, num_seg_classes=5, channel_dim=None): """ - Return a noisy 2D image with `numObj' circles and a 2D mask image. The maximum radius of the circles is given as - `radMax'. The mask will have `numSegClasses' number of classes for segmentations labeled sequentially from 1, plus a - background class represented as 0. If `noiseMax' is greater than 0 then noise will be added to the image taken from - the uniform distribution on range [0,noiseMax). If `channel_dim' is None, will create an image without channel + Return a noisy 2D image with `num_obj` circles and a 2D mask image. The maximum radius of the circles is given as + `rad_max`. The mask will have `num_seg_classes` number of classes for segmentations labeled sequentially from 1, plus a + background class represented as 0. If `noise_max` is greater than 0 then noise will be added to the image taken from + the uniform distribution on range `[0,noise_max)`. If `channel_dim` is None, will create an image without channel dimension, otherwise create an image with channel dimension as first dim or last dim. """ image = np.zeros((width, height)) @@ -55,7 +55,7 @@ def create_test_image_3d(height, width, depth, num_objs=12, rad_max=30, Return a noisy 3D image and segmentation. See also: - ``create_test_image_2d`` + :py:meth:`~create_test_image_2d` """ image = np.zeros((width, height, depth)) diff --git a/monai/data/utils.py b/monai/data/utils.py index 81f9ac8c56..a19916ac7c 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -121,21 +121,21 @@ def dense_patch_slices(image_size, patch_size, scan_interval): def iter_patch(arr, patch_size, start_pos=(), copy_back=True, pad_mode="wrap", **pad_opts): """ - Yield successive patches from `arr' of size `patchSize'. The iteration can start from position `startPos' in `arr' - but drawing from a padded array extended by the `patchSize' in each dimension (so these coordinates can be negative - to start in the padded region). If `copyBack' is True the values from each patch are written back to `arr'. + Yield successive patches from `arr` of size `patch_size`. The iteration can start from position `start_pos` in `arr` + but drawing from a padded array extended by the `patch_size` in each dimension (so these coordinates can be negative + to start in the padded region). If `copy_back` is True the values from each patch are written back to `arr`. Args: arr (np.ndarray): array to iterate over patch_size (tuple of int or None): size of patches to generate slices for, 0 or None selects whole dimension start_pos (tuple of it, optional): starting position in the array, default is 0 for each dimension copy_back (bool): if True data from the yielded patches is copied back to `arr` once the generator completes - pad_mode (str, optional): padding mode, see numpy.pad - pad_opts (dict, optional): padding options, see numpy.pad + pad_mode (str, optional): padding mode, see `numpy.pad` + pad_opts (dict, optional): padding options, see `numpy.pad` Yields: Patches of array data from `arr` which are views into a padded array which can be modified, if `copy_back` is - True these changes will be reflected in `arr` once the iteration completes + True these changes will be reflected in `arr` once the iteration completes. """ # ensure patchSize and startPos are the right length patch_size = get_valid_patch_size(arr.shape, patch_size) diff --git a/monai/engine/multi_gpu_supervised_trainer.py b/monai/engine/multi_gpu_supervised_trainer.py index 12d7605d0e..ea9fb2044d 100644 --- a/monai/engine/multi_gpu_supervised_trainer.py +++ b/monai/engine/multi_gpu_supervised_trainer.py @@ -53,13 +53,14 @@ def _default_eval_transform(x, y, y_pred): def create_multigpu_supervised_trainer(net, optimizer, loss_fn, devices=None, non_blocking=False, prepare_batch=_prepare_batch, output_transform=_default_transform): """ - ***Derived from `create_supervised_trainer` in Ignite. + Derived from `create_supervised_trainer` in Ignite. Factory function for creating a trainer for supervised models. + Args: net (`torch.nn.Module`): the network to train. optimizer (`torch.optim.Optimizer`): the optimizer to use. - loss_fn (torch.nn loss function): the loss function to use. + loss_fn (`torch.nn` loss function): the loss function to use. devices (list, optional): device(s) type specification (default: None). Applies to both model and batches. None is all devices used, empty list is CPU only. non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously @@ -68,10 +69,13 @@ def create_multigpu_supervised_trainer(net, optimizer, loss_fn, devices=None, no tuple of tensors `(batch_x, batch_y)`. output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`. - Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss - of the processed batch by default. + Returns: Engine: a trainer engine with supervised update function. + + Note: + `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss + of the processed batch by default. """ devices = get_devices_spec(devices) @@ -86,9 +90,10 @@ def create_multigpu_supervised_trainer(net, optimizer, loss_fn, devices=None, no def create_multigpu_supervised_evaluator(net, metrics=None, devices=None, non_blocking=False, prepare_batch=_prepare_batch, output_transform=_default_eval_transform): """ - ***Derived from `create_supervised_evaluator` in Ignite. + Derived from `create_supervised_evaluator` in Ignite. Factory function for creating an evaluator for supervised models. + Args: net (`torch.nn.Module`): the model to train. metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics. @@ -101,8 +106,11 @@ def create_multigpu_supervised_evaluator(net, metrics=None, devices=None, non_bl output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits output expected by metrics. If you change it you should use `output_transform` in metrics. - Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is + + Note: + `engine.state.output` for this engine is defind by `output_transform` parameter and is a tuple of `(batch_pred, batch_y)` by default. + Returns: Engine: an evaluator engine with supervised inference function. """ diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py index bbf1323a17..82d1d67e04 100644 --- a/monai/handlers/checkpoint_loader.py +++ b/monai/handlers/checkpoint_loader.py @@ -26,8 +26,9 @@ class CheckpointLoader: Args: load_path (string): the file path of checkpoint, it should be a PyTorch pth file. - load_dict (dict): target objects that load checkpoint to. examples: - {'network': net, 'optimizer': optimizer, 'engine', engine} + load_dict (dict): target objects that load checkpoint to. examples:: + + {'network': net, 'optimizer': optimizer, 'engine', engine} """ diff --git a/monai/handlers/stats_handler.py b/monai/handlers/stats_handler.py index 9d2e518919..1dd4cf4e8a 100644 --- a/monai/handlers/stats_handler.py +++ b/monai/handlers/stats_handler.py @@ -27,7 +27,8 @@ class StatsHandler(object): Default behaviors: - When EPOCH_COMPLETED, logs ``engine.state.metrics`` using ``self.logger``. - When ITERATION_COMPELTED, logs - ``self.output_transform(engine.state.output)`` using ``self.logger``. + ``self.output_transform(engine.state.output)`` using ``self.logger``. + """ def __init__(self, @@ -39,6 +40,7 @@ def __init__(self, tag_name=DEFAULT_TAG, key_var_format=DEFAULT_KEY_VAL_FORMAT): """ + Args: epoch_print_logger (Callable): customized callable printer for epoch level logging. must accept parameter "engine", use default printer if None. diff --git a/monai/handlers/tensorboard_handlers.py b/monai/handlers/tensorboard_handlers.py index 411a53084b..fb5eb34e4e 100644 --- a/monai/handlers/tensorboard_handlers.py +++ b/monai/handlers/tensorboard_handlers.py @@ -163,11 +163,12 @@ class TensorBoardImageHandler(object): Default behavior: - Show y_pred as images (GIF for 3D) on TensorBoard when Event triggered, - need to use ``batch_transform`` and ``output_transform`` to specify - how many images to show and show which channel. + how many images to show and show which channel. - Expects ``batch_transform(engine.state.batch)`` to return data - format: (image[N, channel, ...], label[N, channel, ...]). + format: (image[N, channel, ...], label[N, channel, ...]). - Expects ``output_transform(engine.state.output)`` to return a torch - tensor in format (y_pred[N, channel, ...], loss). + tensor in format (y_pred[N, channel, ...], loss). + """ def __init__(self, @@ -189,7 +190,6 @@ def __init__(self, For example, in evaluation, the evaluator engine needs to know current epoch from trainer. max_channels (int): number of channels to plot. max_frames (int): number of frames for 2D-t plot. - """ self._writer = SummaryWriter() if summary_writer is None else summary_writer self.batch_transform = batch_transform diff --git a/monai/losses/dice.py b/monai/losses/dice.py index 808c3c65d3..10dc15ad77 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -23,11 +23,11 @@ @alias("dice", "Dice") class DiceLoss(_Loss): """ - Multiclass dice loss. Input logits 'pred' (BNHW[D] where N is number of classes) is compared with ground truth - `ground' (B1HW[D]). Axis N of `pred' is expected to have logit predictions for each class rather than being image - channels, while the same axis of `ground' should be 1. If the N channel of `pred' is 1 binary dice loss will be - calculated. The `smooth' parameter is a value added to the intersection and union components of the inter-over-union - calculation to smooth results and prevent divide-by-0, this value should be small. The `include_background' class + Multiclass dice loss. Input logits `pred` (BNHW[D] where N is number of classes) is compared with ground truth + `ground' (B1HW[D]). Axis N of `pred` is expected to have logit predictions for each class rather than being image + channels, while the same axis of `ground` should be 1. If the N channel of `pred` is 1 binary dice loss will be + calculated. The `smooth` parameter is a value added to the intersection and union components of the inter-over-union + calculation to smooth results and prevent divide-by-0, this value should be small. The `include_background` class attribute can be set to False for an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be background. If the non-background segmentations are small compared to the total image size they can get overwhelmed by the signal from the background so excluding it in such cases helps convergence. @@ -86,6 +86,7 @@ def forward(self, pred, ground, smooth=1e-5): class GeneralizedDiceLoss(_Loss): """ Compute the generalised Dice loss defined in: + Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning loss function for highly unbalanced segmentations. DLMIA 2017. diff --git a/monai/metrics/compute_meandice.py b/monai/metrics/compute_meandice.py index 37bf95c646..d88ec32490 100644 --- a/monai/metrics/compute_meandice.py +++ b/monai/metrics/compute_meandice.py @@ -44,10 +44,10 @@ def compute_meandice(y_pred, Dice scores per batch and per class (shape: [batch_size, n_classes]). Note: - This method provide two options to convert `y_pred` into a binary matrix: - (1) when `mutually_exclusive` is True, it uses a combination of argmax and to_onehot, - (2) when `mutually_exclusive` is False, it uses a threshold `logit_thresh` - (optionally with a sigmoid function before thresholding). + This method provides two options to convert `y_pred` into a binary matrix + (1) when `mutually_exclusive` is True, it uses a combination of ``argmax`` and ``to_onehot``, + (2) when `mutually_exclusive` is False, it uses a threshold ``logit_thresh`` + (optionally with a ``sigmoid`` function before thresholding). """ n_classes = y_pred.shape[1] diff --git a/monai/networks/layers/convutils.py b/monai/networks/layers/convutils.py index 0a1f8ff0b2..96781448f7 100644 --- a/monai/networks/layers/convutils.py +++ b/monai/networks/layers/convutils.py @@ -26,8 +26,8 @@ def same_padding(kernel_size, dilation=1): def calculate_out_shape(in_shape, kernel_size, stride, padding): """ - Calculate the output tensor shape when applying a convolution to a tensor of shape `inShape' with kernel size - 'kernel_size', stride value `stride', and input padding value `padding'. All arguments can be scalars or multiple + Calculate the output tensor shape when applying a convolution to a tensor of shape `inShape` with kernel size + `kernel_size`, stride value `stride`, and input padding value `padding`. All arguments can be scalars or multiple values, return value is a scalar if all inputs are scalars. """ in_shape = np.atleast_1d(in_shape) @@ -38,6 +38,16 @@ def calculate_out_shape(in_shape, kernel_size, stride, padding): def gaussian_1d(sigma, truncated=4.): + """ + one dimensional gaussian kernel. + + Args: + sigma: std of the kernel + truncated: tail length + + Returns: + 1D numpy array + """ if sigma <= 0: raise ValueError('sigma must be positive') diff --git a/monai/networks/layers/factories.py b/monai/networks/layers/factories.py index b295453bbd..139de92655 100644 --- a/monai/networks/layers/factories.py +++ b/monai/networks/layers/factories.py @@ -9,6 +9,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +""" +handles spatial 1D, 2D, 3D network components with a factory pattern. +""" + from torch import nn as nn diff --git a/monai/networks/utils.py b/monai/networks/utils.py index bca9922374..628e4ea762 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -Utilities and types for defining networks, these depend on Pytorch. +Utilities and types for defining networks, these depend on PyTorch. """ import torch @@ -18,10 +18,11 @@ def one_hot(labels, num_classes): """ - For a tensor `labels' of dimensions B1[spatial_dims], return a tensor of dimensions BN[spatial_dims] - for `num_classes' N number of classes. + For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]` + for `num_classes` N number of classes. Example: + For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0. Note that this will include the background label, thus a binary mask should be treated as having 2 classes. """ @@ -47,10 +48,9 @@ def slice_channels(tensor, *slicevals): def predict_segmentation(logits): """ - Given the logits from a network, computing the segmentation by thresholding all values above 0 if `logits' has one - channel, or computing the argmax along the channel axis otherwise. + Given the logits from a network, computing the segmentation by thresholding all values above 0 if `logits` has one + channel, or computing the `argmax` along the channel axis otherwise, logits has shape `BCHW[D]` """ - # generate prediction outputs, logits has shape BCHW[D] if logits.shape[1] == 1: return (logits >= 0).int() # for binary segmentation threshold on channel 0 else: diff --git a/monai/transforms/adaptors.py b/monai/transforms/adaptors.py index b0a8571574..183085e5db 100644 --- a/monai/transforms/adaptors.py +++ b/monai/transforms/adaptors.py @@ -9,10 +9,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import monai - """ How to use the adaptor function +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The key to using 'adaptor' lies in understanding the function that want to adapt. The 'inputs' and 'outputs' parameters take either strings, lists/tuples @@ -25,23 +24,27 @@ only necessary to name the dictionary keyword to which that value is assigned. Use of `outputs` +---------------- `outputs` can take either a string, a list/tuple of string or a dict of string to string, depending on what the transform being adapted returns: -. If the transform returns a single argument, then outputs can be supplied a - string that indicates what key to assign the return value to in the - dictionary -. If the transform returns a list/tuple of values, then outputs can be supplied - a list/tuple of the same length. The strings in outputs map the return value - at the corresponding position to a key in the dictionary -. If the transform returns a dictionary of values, then outputs must be supplied - a dictionary that maps keys in the function's return dictionary to the - dictionary being passed between functions + + - If the transform returns a single argument, then outputs can be supplied a + string that indicates what key to assign the return value to in the + dictionary + - If the transform returns a list/tuple of values, then outputs can be supplied + a list/tuple of the same length. The strings in outputs map the return value + at the corresponding position to a key in the dictionary + - If the transform returns a dictionary of values, then outputs must be supplied + a dictionary that maps keys in the function's return dictionary to the + dictionary being passed between functions Note, the caller is free to use a more complex way of specifying the outputs parameter than is required. The following are synonymous and will be treated identically: -``` + +.. code-block:: python + # single argument adaptor(MyTransform(), 'image') adaptor(MyTransform(), ['image']) @@ -50,47 +53,51 @@ # multiple arguments adaptor(MyTransform(), ['image', 'label']) adaptor(MyTransform(), {'image': 'image', 'label': 'label'}) -``` Use of `inputs` +--------------- `inputs` can usually be omitted when using `adaptor`. It is only required when a the function's parameter names do not match the names in the dictionary that is used to chain transform calls. -``` -class MyTransform1: - ... - def __call__(image): - return '''do stuff to image''' +.. code-block:: python -class MyTransform2: - ... - def __call__(img): - return '''do stuff to image''' + class MyTransform1: + ... + def __call__(image): + return '''do stuff to image''' -d = {'image': i} + class MyTransform2: + ... + def __call__(img): + return '''do stuff to image''' -Compose([ - adaptor(MyTransform1(), 'image'), - adaptor(MyTransform2(), 'image', {'img':'image'}) -]) -``` + d = {'image': i} + + Compose([ + adaptor(MyTransform1(), 'image'), + adaptor(MyTransform2(), 'image', {'img':'image'}) + ]) Inputs: -dictionary in: None | Name maps -params in (match): None | Name list | Name maps -params in (mismatch): Name maps -params & **kwargs (match) : None | Name maps -params & **kwargs (mismatch) : Name maps + +- dictionary in: None | Name maps +- params in (match): None | Name list | Name maps +- params in (mismatch): Name maps +- params & `**kwargs` (match) : None | Name maps +- params & `**kwargs` (mismatch) : Name maps Outputs: -dictionary out: None | Name maps -list/tuple out: list/tuple -variable out: string + +- dictionary out: None | Name maps +- list/tuple out: list/tuple +- variable out: string """ +import monai + @monai.utils.export('monai.transforms') def adaptor(function, outputs, inputs=None): diff --git a/monai/transforms/composables.py b/monai/transforms/composables.py index d8cc708af7..ffb47fc13c 100644 --- a/monai/transforms/composables.py +++ b/monai/transforms/composables.py @@ -10,16 +10,17 @@ # limitations under the License. """ A collection of dictionary-based wrappers around the "vanilla" transforms -defined in `monai.transforms.transforms`. +defined in :py:class:`monai.transforms.transforms`. + +Class names are ended with 'd' to denote dictionary-based transforms. """ import torch import numpy as np -from collections.abc import Hashable import monai from monai.data.utils import get_random_patch, get_valid_patch_size from monai.networks.layers.simplelayers import GaussianFilter -from monai.transforms.compose import Randomizable, Transform +from monai.transforms.compose import Randomizable, MapTransform from monai.transforms.transforms import (LoadNifti, AsChannelFirst, Orientation, AddChannel, Spacing, Rotate90, SpatialCrop, RandAffine, Rand2DElastic, Rand3DElastic, @@ -31,40 +32,11 @@ export = monai.utils.export("monai.transforms") -@export -class MapTransform(Transform): - """ - A subclass of ``monai.transforms.compose.Transform`` with an assumption - that the ``data`` input of ``self.__call__`` is a MutableMapping such as ``dict``. - - The ``keys`` parameter will be used to get and set the actual data - item to transform. That is, the callable of this transform should - follow the pattern: - .. code-block:: python - - def __call__(self, data): - for key in self.keys: - if key in data: - # update output data with some_transform_function(data[key]). - else: - # do nothing or some exceptions handling. - return data - """ - - def __init__(self, keys): - self.keys = ensure_tuple(keys) - if not self.keys: - raise ValueError('keys unspecified') - for key in self.keys: - if not isinstance(key, Hashable): - raise ValueError('keys should be a hashable or a sequence of hashables, got {}'.format(type(key))) - - @export @alias('SpacingD', 'SpacingDict') class Spacingd(MapTransform): """ - dictionary-based wrapper of :class: `monai.transforms.transforms.Spacing`. + dictionary-based wrapper of :py:class:`monai.transforms.transforms.Spacing`. """ def __init__(self, keys, affine_key, pixdim, interp_order=2, keep_shape=False, output_key='spacing'): @@ -81,7 +53,6 @@ def __init__(self, keys, affine_key, pixdim, interp_order=2, keep_shape=False, o after resampling. Defaults to False. output_key (hashable): key to be added to the output dictionary to track the pixdim status. - """ MapTransform.__init__(self, keys) self.affine_key = affine_key @@ -105,7 +76,7 @@ def __call__(self, data): @alias('OrientationD', 'OrientationDict') class Orientationd(MapTransform): """ - dictionary-based wrapper of :class: `monai.transforms.transforms.Orientation`. + dictionary-based wrapper of :py:class:`monai.transforms.transforms.Orientation`. """ def __init__(self, keys, affine_key, axcodes, labels=None, output_key='orientation'): @@ -115,12 +86,14 @@ def __init__(self, keys, affine_key, axcodes, labels=None, output_key='orientati The affine will be used to compute input data's orientation. axcodes (N elements sequence): for spatial ND input's orientation. e.g. axcodes='RAS' represents 3D orientation: - (Left, Right), (Posterior, Anterior), (Inferior, Superior). + (Left, Right), (Posterior, Anterior), (Inferior, Superior). default orientation labels options are: 'L' and 'R' for the first dimension, 'P' and 'A' for the second, 'I' and 'S' for the third. labels : optional, None or sequence of (2,) sequences (2,) sequences are labels for (beginning, end) of output axis. - see: ``nibabel.orientations.ornt2axcodes``. + + See Also: + `nibabel.orientations.ornt2axcodes`. """ MapTransform.__init__(self, keys) self.affine_key = affine_key @@ -148,7 +121,7 @@ def __init__(self, keys, as_closest_canonical=False, dtype=None, meta_key_format """ Args: keys (hashable items): keys of the corresponding items to be transformed. - See also: monai.transform.composables.MapTransform + See also: :py:class:`monai.transforms.compose.MapTransform` as_closest_canonical (bool): if True, load the image as closest to canonical axis format. dtype (np.dtype, optional): if not None convert the loaded image to this data type. meta_key_format (str): key format to store meta data of the nifti image. @@ -187,7 +160,7 @@ def __init__(self, keys, channel_dim=-1): """ Args: keys (hashable items): keys of the corresponding items to be transformed. - See also: monai.transform.composables.MapTransform + See also: :py:class:`monai.transforms.compose.MapTransform` channel_dim (int): which dimension of input image is the channel, default is the last dimension. """ MapTransform.__init__(self, keys) @@ -211,7 +184,7 @@ def __init__(self, keys): """ Args: keys (hashable items): keys of the corresponding items to be transformed. - See also: monai.transform.composables.MapTransform + See also: :py:class:`monai.transforms.compose.MapTransform` """ MapTransform.__init__(self, keys) self.adder = AddChannel() @@ -273,9 +246,10 @@ def __call__(self, data): class Resized(MapTransform): """ dictionary-based wrapper of Resize. + Args: keys (hashable items): keys of the corresponding items to be transformed. - See also: monai.transform.composables.MapTransform + See also: :py:class:`monai.transforms.compose.MapTransform` output_spatial_shape (tuple or list): expected shape of spatial dimensions after resize operation. order (int): Order of spline interpolation. Default=1. mode (str): Points outside boundaries are filled according to given mode. @@ -285,8 +259,7 @@ class Resized(MapTransform): preserve_range (bool): Whether to keep original range of values. Default is True. If False, input is converted according to conventions of img_as_float. See https://scikit-image.org/docs/dev/user_guide/data_types.html. - anti_aliasing (bool): Whether to apply a gaussian filter to image before down-scaling. - Default is True. + anti_aliasing (bool): Whether to apply a gaussian filter to image before down-scaling. Default is True. anti_aliasing_sigma (float, tuple of floats): Standard deviation for gaussian filtering. """ @@ -346,7 +319,7 @@ def __init__(self, keys, prob=0.1, max_k=3, spatial_axes=(0, 1)): """ Args: keys (hashable items): keys of the corresponding items to be transformed. - See also: monai.transform.composables.MapTransform + See also: :py:class:`monai.transforms.compose.MapTransform` prob (float): probability of rotating. (Default 0.1, with 10% probability it returns a rotated array.) max_k (int): number of rotations will be sampled from `np.random.randint(max_k) + 1`. @@ -439,7 +412,7 @@ def __call__(self, data): @alias('RandAffineD', 'RandAffineDict') class RandAffined(Randomizable, MapTransform): """ - A dictionary-based wrapper of ``monai.transforms.transforms.RandAffine``. + A dictionary-based wrapper of :py:class:`monai.transforms.transforms.RandAffine`. """ def __init__(self, keys, @@ -464,8 +437,8 @@ def __init__(self, keys, device (torch.device): device on which the tensor will be allocated. See also: - - ``monai.transform.composables.MapTransform`` - - ``RandAffineGrid`` for the random affine paramters configurations. + - :py:class:`monai.transforms.compose.MapTransform` + - :py:class:`RandAffineGrid` for the random affine paramters configurations. """ MapTransform.__init__(self, keys) default_mode = 'bilinear' if isinstance(mode, (tuple, list)) else mode @@ -509,7 +482,7 @@ def __call__(self, data): @alias('Rand2DElasticD', 'Rand2DElasticDict') class Rand2DElasticd(Randomizable, MapTransform): """ - A dictionary-based wrapper of ``monai.transforms.transforms.Rand2DElastic``. + A dictionary-based wrapper of :py:class:`monai.transforms.transforms.Rand2DElastic`. """ def __init__(self, keys, @@ -535,8 +508,8 @@ def __init__(self, keys, whether to convert it back to numpy arrays. device (torch.device): device on which the tensor will be allocated. See also: - - ``RandAffineGrid`` for the random affine paramters configurations. - - ``Affine`` for the affine transformation parameters configurations. + - :py:class:`RandAffineGrid` for the random affine paramters configurations. + - :py:class:`Affine` for the affine transformation parameters configurations. """ MapTransform.__init__(self, keys) default_mode = 'bilinear' if isinstance(mode, (tuple, list)) else mode @@ -582,7 +555,7 @@ def __call__(self, data): @alias('Rand3DElasticD', 'Rand3DElasticDict') class Rand3DElasticd(Randomizable, MapTransform): """ - A dictionary-based wrapper of ``monai.transforms.transforms.Rand3DElastic``. + A dictionary-based wrapper of :py:class:`monai.transforms.transforms.Rand3DElastic`. """ def __init__(self, keys, @@ -609,8 +582,8 @@ def __init__(self, keys, whether to convert it back to numpy arrays. device (torch.device): device on which the tensor will be allocated. See also: - - ``RandAffineGrid`` for the random affine paramters configurations. - - ``Affine`` for the affine transformation parameters configurations. + - :py:class:`RandAffineGrid` for the random affine paramters configurations. + - :py:class:`Affine` for the affine transformation parameters configurations. """ MapTransform.__init__(self, keys) default_mode = 'bilinear' if isinstance(mode, (tuple, list)) else mode @@ -656,7 +629,8 @@ def __call__(self, data): @alias('FlipD', 'FlipDict') class Flipd(MapTransform): """Dictionary-based wrapper of Flip. - See numpy.flip for additional details. + + See `numpy.flip` for additional details. https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html Args: @@ -679,7 +653,8 @@ def __call__(self, data): @alias('RandFlipD', 'RandFlipDict') class RandFlipd(Randomizable, MapTransform): """Dict-based wrapper of RandFlip. - See numpy.flip for additional details. + + See `numpy.flip` for additional details. https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html Args: @@ -748,18 +723,18 @@ class RandRotated(Randomizable, MapTransform): Args: prob (float): Probability of rotation. degrees (tuple of float or float): Range of rotation in degrees. If single number, - angle is picked from (-degrees, degrees). + angle is picked from (-degrees, degrees). spatial_axes (tuple of 2 ints): Spatial axes of rotation. Default: (0, 1). This is the first two axis in spatial dimensions. reshape (bool): If true, output shape is made same as input. Default: True. order (int): Order of spline interpolation. Range 0-5. Default: 1. This is different from scipy where default interpolation is 3. - mode (str): Points outside boundary filled according to this mode. Options are + mode (str): Points outside boundary filled according to this mode. Options are 'constant', 'nearest', 'reflect', 'wrap'. Default: 'constant'. cval (scalar): Value to fill outside boundary. Default: 0. prefiter (bool): Apply spline_filter before interpolation. Default: True. """ - def __init__(self, keys, degrees, prob=0.1, spatial_axes=(0, 1), reshape=True, order=1, + def __init__(self, keys, degrees, prob=0.1, spatial_axes=(0, 1), reshape=True, order=1, mode='constant', cval=0, prefilter=True): MapTransform.__init__(self, keys) self.prob = prob @@ -847,7 +822,7 @@ class RandZoomd(Randomizable, MapTransform): keep_size (bool): Should keep original size (pad if needed). """ - def __init__(self, keys, prob=0.1, min_zoom=0.9, + def __init__(self, keys, prob=0.1, min_zoom=0.9, max_zoom=1.1, order=3, mode='constant', cval=0, prefilter=True, use_gpu=False, keep_size=False): MapTransform.__init__(self, keys) @@ -897,11 +872,9 @@ def __init__(self, keys): """ Args: keys (hashable items): keys of the corresponding items to be transformed. - See also: monai.transform.composables.MapTransform + See also: :py:class:`monai.transforms.compose.MapTransform` """ MapTransform.__init__(self, keys) def __call__(self, data): - for key in self.keys: - del data[key] - return dict(data) + return {key: val for key, val in data.items() if key not in self.keys} diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py index d6e5e4aa29..51ff49923a 100644 --- a/monai/transforms/compose.py +++ b/monai/transforms/compose.py @@ -8,11 +8,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +""" +A collection of generic interfaces for MONAI transforms. +""" import warnings +from typing import Hashable import numpy as np +from monai.utils.misc import ensure_tuple + class Transform: """ @@ -21,19 +27,27 @@ class Transform: It could be stateful and may modify ``data`` in place, the implementation should be aware of: - - thread safety when mutating its own states. - When used from a multi-process context, transform's instance variables are read-only. - - ``data`` content unused by this transform may still be used in the - subsequent transforms in a composed transform. - see also: `monai.transforms.compose.Compose`. - - storing too much information in ``data`` may not scale. + + #. thread safety when mutating its own states. + When used from a multi-process context, transform's instance variables are read-only. + #. ``data`` content unused by this transform may still be used in the + subsequent transforms in a composed transform. + #. storing too much information in ``data`` may not scale. + + See Also + + :py:class:`monai.transforms.compose.Compose` """ def __call__(self, data): """ ``data`` is an element which often comes from an iteration over an - iterable, such as``torch.utils.data.Dataset``. This method should + iterable, such as :py:class:`torch.utils.data.Dataset`. This method should return an updated version of ``data``. + To simplify the input validations, most of the transforms assume that + + - ``data`` component is a "channel-first" array, + - the channel dimension is not omitted even if number of channels is one. """ raise NotImplementedError @@ -48,7 +62,7 @@ class Randomizable: def set_random_state(self, seed=None, state=None): """ Set the random state locally, to control the randomness, the derived - classes should use `self.R` instead of `np.random` to introduce random + classes should use :py:attr:`self.R` instead of `np.random` to introduce random factors. Args: @@ -76,63 +90,70 @@ def set_random_state(self, seed=None, state=None): def randomize(self): """ - all self.R calls happen here so that we have a better chance to identify errors of sync the random state. + Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors. + + all :py:attr:`self.R` calls happen here so that we have a better chance to + identify errors of sync the random state. """ raise NotImplementedError class Compose(Randomizable): """ - `Compose` provides the ability to chain a series of calls together in a + ``Compose`` provides the ability to chain a series of calls together in a sequence. Each transform in the sequence must take a single argument and return a single value, so that the transforms can be called in a chain. - `Compose` can be used in two ways: - 1. With a series of transforms that accept and return a single ndarray / - / tensor / tensor-like parameter - 2. With a series of transforms that accept and return a dictionary that - contains one or more parameters. Such transforms must have pass-through - semantics; unused values in the dictionary must be copied to the return - dictionary. It is required that the dictionary is copied between input - and output of each transform. + ``Compose`` can be used in two ways: + + #. With a series of transforms that accept and return a single + ndarray / tensor / tensor-like parameter. + #. With a series of transforms that accept and return a dictionary that + contains one or more parameters. Such transforms must have pass-through + semantics; unused values in the dictionary must be copied to the return + dictionary. It is required that the dictionary is copied between input + and output of each transform. + If some transform generates a list batch of data in the transform chain, every item in the list is still a dictionary, and all the following transforms will apply to every item of the list, for example: - (1) transformA normalizes the intensity of 'img' field in the dict data. - (2) transformB crops out a list batch of images on 'img' and 'seg' field. - And constructs a list of dict data, other fields are copied: - { [{ { - 'img': [1, 2], 'img': [1], 'img': [2], - 'seg': [1, 2], 'seg': [1], 'seg': [2], - 'extra': 123, ---> 'extra': 123, 'extra': 123, - 'shape': 'CHWD' 'shape': 'CHWD' 'shape': 'CHWD' - } }, }] - (3) transformC then randomly rotates or flips 'img' and 'seg' fields of - every dictionary item in the list. + + #. transformA normalizes the intensity of 'img' field in the dict data. + #. transformB crops out a list batch of images on 'img' and 'seg' field. + And constructs a list of dict data, other fields are copied:: + + { [{ { + 'img': [1, 2], 'img': [1], 'img': [2], + 'seg': [1, 2], 'seg': [1], 'seg': [2], + 'extra': 123, --> 'extra': 123, 'extra': 123, + 'shape': 'CHWD' 'shape': 'CHWD' 'shape': 'CHWD' + } }, }] + + #. transformC then randomly rotates or flips 'img' and 'seg' fields of + every dictionary item in the list. + When using the pass-through dictionary operation, you can make use of - `monai.data.transforms.adaptor` to wrap transforms that don't conform + :class:`monai.transforms.adaptors.adaptor` to wrap transforms that don't conform to the requirements. This approach allows you to use transforms from otherwise incompatible libraries with minimal additional work. Note: - In many cases, Compose is not the best way to create pre-processing - pipelines. Pre-processing is often not a strictly sequential series of - operations, and much of the complexity arises when a not-sequential - set of functions must be called as if it were a sequence. - - Example: images and labels - Images typically require some kind of normalisation that labels do not. - Both are then typically augmented through the use of random rotations, - flips, and deformations. - Compose can be used with a series of transforms that take a dictionary - that contains 'image' and 'label' entries. This might require wrapping - `torchvision` transforms before passing them to compose. - Alternatively, one can create a class with a __call__ function that - calls your pre-processing functions taking into account that not all of - them are called on the labels - - TODO: example / links to alternative approaches + In many cases, Compose is not the best way to create pre-processing + pipelines. Pre-processing is often not a strictly sequential series of + operations, and much of the complexity arises when a not-sequential + set of functions must be called as if it were a sequence. + + Example: images and labels + Images typically require some kind of normalisation that labels do not. + Both are then typically augmented through the use of random rotations, + flips, and deformations. + Compose can be used with a series of transforms that take a dictionary + that contains 'image' and 'label' entries. This might require wrapping + `torchvision` transforms before passing them to compose. + Alternatively, one can create a class with a `__call__` function that + calls your pre-processing functions taking into account that not all of + them are called on the labels. """ def __init__(self, transforms=None): @@ -169,3 +190,33 @@ def __call__(self, input_): else: input_ = transform(input_) return input_ + + +class MapTransform(Transform): + """ + A subclass of :py:class:`monai.transforms.compose.Transform` with an assumption + that the ``data`` input of ``self.__call__`` is a MutableMapping such as ``dict``. + + The ``keys`` parameter will be used to get and set the actual data + item to transform. That is, the callable of this transform should + follow the pattern: + + .. code-block:: python + + def __call__(self, data): + for key in self.keys: + if key in data: + # update output data with some_transform_function(data[key]). + else: + # do nothing or some exceptions handling. + return data + + """ + + def __init__(self, keys): + self.keys = ensure_tuple(keys) + if not self.keys: + raise ValueError('keys unspecified') + for key in self.keys: + if not isinstance(key, Hashable): + raise ValueError('keys should be a hashable or a sequence of hashables, got {}'.format(type(key))) diff --git a/monai/transforms/transforms.py b/monai/transforms/transforms.py index 0e326c6f73..7427187512 100644 --- a/monai/transforms/transforms.py +++ b/monai/transforms/transforms.py @@ -95,12 +95,13 @@ def __init__(self, axcodes, labels=None): Args: axcodes (N elements sequence): for spatial ND input's orientation. e.g. axcodes='RAS' represents 3D orientation: - (Left, Right), (Posterior, Anterior), (Inferior, Superior). + (Left, Right), (Posterior, Anterior), (Inferior, Superior). default orientation labels options are: 'L' and 'R' for the first dimension, 'P' and 'A' for the second, 'I' and 'S' for the third. labels : optional, None or sequence of (2,) sequences (2,) sequences are labels for (beginning, end) of output axis. - see: ``nibabel.orientations.ornt2axcodes``. + + See Also: `nibabel.orientations.ornt2axcodes`. """ self.axcodes = axcodes self.labels = labels @@ -520,8 +521,8 @@ def __call__(self, img): class IntensityNormalizer: """Normalize input based on provided args, using calculated mean and std if not provided (shape of subtrahend and divisor must match. if 0, entire volume uses same subtrahend and - divisor, otherwise the shape can have dimension 1 for channels). - Current implementation can only support 'channel_last' format data. + divisor, otherwise the shape can have dimension 1 for channels). + Current implementation can only support 'channel_last' format data. Args: subtrahend (ndarray): the amount to subtract by (usually the mean) @@ -897,7 +898,10 @@ def __init__(self, N-th parameter. See also: - `from monai.transforms.utils import (create_rotate, create_shear, create_translate, create_scale)` + - :py:meth:`monai.transforms.utils.create_rotate` + - :py:meth:`monai.transforms.utils.create_shear` + - :py:meth:`monai.transforms.utils.create_translate` + - :py:meth:`monai.transforms.utils.create_scale` """ self.rotate_range = ensure_tuple(rotate_range) self.shear_range = ensure_tuple(shear_range) @@ -944,8 +948,8 @@ def __init__(self, spacing, magnitude_range, as_tensor_output=True, device=None) Args: spacing (2 or 3 ints): spacing of the grid in 2D or 3D. e.g., spacing=(1, 1) indicates pixel-wise deformation in 2D, - spacing=(1, 1, 1) indicates voxel-wise deformation in 3D, - spacing=(2, 2) indicates deformation field defined on every other pixel in 2D. + spacing=(1, 1, 1) indicates voxel-wise deformation in 3D, + spacing=(2, 2) indicates deformation field defined on every other pixel in 2D. magnitude_range (2 ints): the random offsets will be generated from `uniform[magnitude[0], magnitude[1])`. as_tensor_output (bool): whether to output tensor instead of numpy array. @@ -1112,8 +1116,8 @@ def __init__(self, device (torch.device): device on which the tensor will be allocated. See also: - RandAffineGrid for the random affine paramters configurations. - Affine for the affine transformation parameters configurations. + - :py:class:`RandAffineGrid` for the random affine paramters configurations. + - :py:class:`Affine` for the affine transformation parameters configurations. """ self.rand_affine_grid = RandAffineGrid(rotate_range=rotate_range, shear_range=shear_range, @@ -1191,8 +1195,8 @@ def __init__(self, device (torch.device): device on which the tensor will be allocated. See also: - RandAffineGrid for the random affine paramters configurations. - Affine for the affine transformation parameters configurations. + - :py:class:`RandAffineGrid` for the random affine paramters configurations. + - :py:class:`Affine` for the affine transformation parameters configurations. """ self.deform_grid = RandDeformGrid(spacing=spacing, magnitude_range=magnitude_range, as_tensor_output=True, device=device) @@ -1273,8 +1277,8 @@ def __init__(self, device (torch.device): device on which the tensor will be allocated. See also: - - ``RandAffineGrid`` for the random affine paramters configurations. - - ``Affine`` for the affine transformation parameters configurations. + - :py:class:`RandAffineGrid` for the random affine paramters configurations. + - :py:class:`Affine` for the affine transformation parameters configurations. """ self.rand_affine_grid = RandAffineGrid(rotate_range, shear_range, translate_range, scale_range, True, device) self.resampler = Resample(padding_mode=padding_mode, as_tensor_output=as_tensor_output, device=device) diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index cc1de277fb..d3ecc1dc13 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -17,35 +17,35 @@ def rand_choice(prob=0.5): - """Returns True if a randomly chosen number is less than or equal to `prob', by default this is a 50/50 chance.""" + """Returns True if a randomly chosen number is less than or equal to `prob`, by default this is a 50/50 chance.""" return random.random() <= prob def img_bounds(img): - """Returns the minimum and maximum indices of non-zero lines in axis 0 of `img', followed by that for axis 1.""" + """Returns the minimum and maximum indices of non-zero lines in axis 0 of `img`, followed by that for axis 1.""" ax0 = np.any(img, axis=0) ax1 = np.any(img, axis=1) return np.concatenate((np.where(ax0)[0][[0, -1]], np.where(ax1)[0][[0, -1]])) def in_bounds(x, y, margin, maxx, maxy): - """Returns True if (x,y) is within the rectangle (margin,margin,maxx-margin,maxy-margin).""" + """Returns True if (x,y) is within the rectangle (margin, margin, maxx-margin, maxy-margin).""" return margin <= x < (maxx - margin) and margin <= y < (maxy - margin) def is_empty(img): - """Returns True if `img' is empty, that is its maximum value is not greater than its minimum.""" + """Returns True if `img` is empty, that is its maximum value is not greater than its minimum.""" return not (img.max() > img.min()) # use > instead of <= so that an image full of NaNs will result in True def ensure_tuple_size(tup, dim): - """Returns a copy of `tup' with `dim' values by either shortened or padded with zeros as necessary.""" + """Returns a copy of `tup` with `dim` values by either shortened or padded with zeros as necessary.""" tup = tuple(tup) + (0,) * dim return tup[:dim] def zero_margins(img, margin): - """Returns True if the values within `margin' indices of the edges of `img' in dimensions 1 and 2 are 0.""" + """Returns True if the values within `margin` indices of the edges of `img` in dimensions 1 and 2 are 0.""" if np.any(img[:, :, :margin]) or np.any(img[:, :, -margin:]): return False @@ -56,7 +56,7 @@ def zero_margins(img, margin): def rescale_array(arr, minv=0.0, maxv=1.0, dtype=np.float32): - """Rescale the values of numpy array `arr' to be from `minv' to `maxv'.""" + """Rescale the values of numpy array `arr` to be from `minv` to `maxv`.""" if dtype is not None: arr = arr.astype(dtype) @@ -71,7 +71,7 @@ def rescale_array(arr, minv=0.0, maxv=1.0, dtype=np.float32): def rescale_instance_array(arr, minv=0.0, maxv=1.0, dtype=np.float32): - """Rescale each array slice along the first dimension of `arr' independently.""" + """Rescale each array slice along the first dimension of `arr` independently.""" out = np.zeros(arr.shape, dtype) for i in range(arr.shape[0]): out[i] = rescale_array(arr[i], minv, maxv, dtype) @@ -80,24 +80,27 @@ def rescale_instance_array(arr, minv=0.0, maxv=1.0, dtype=np.float32): def rescale_array_int_max(arr, dtype=np.uint16): - """Rescale the array `arr' to be between the minimum and maximum values of the type `dtype'.""" + """Rescale the array `arr` to be between the minimum and maximum values of the type `dtype`.""" info = np.iinfo(dtype) return rescale_array(arr, info.min, info.max).astype(dtype) def copypaste_arrays(src, dest, srccenter, destcenter, dims): """ - Calculate the slices to copy a sliced area of array `src' into array `dest'. The area has dimensions `dims' (use 0 - or None to copy everything in that dimension), the source area is centered at `srccenter' index in `src' and copied - into area centered at `destcenter' in `dest'. The dimensions of the copied area will be clipped to fit within the + Calculate the slices to copy a sliced area of array `src` into array `dest`. The area has dimensions `dims` (use 0 + or None to copy everything in that dimension), the source area is centered at `srccenter` index in `src` and copied + into area centered at `destcenter` in `dest`. The dimensions of the copied area will be clipped to fit within the source and destination arrays so a smaller area may be copied than expected. Return value is the tuples of slice - objects indexing the copied area in `src', and those indexing the copy area in `dest'. + objects indexing the copied area in `src`, and those indexing the copy area in `dest`. - Example: - src=np.random.randint(0,10,(6,6)) - dest=np.zeros_like(src) - srcslices,destslices=copypasteArrays(src,dest,(3,2),(2,1),(3,4)) - dest[destslices]=src[srcslices] + Example + + .. code-block:: python + + src = np.random.randint(0,10,(6,6)) + dest = np.zeros_like(src) + srcslices, destslices = copypaste_arrays(src, dest, (3, 2),(2, 1),(3, 4)) + dest[destslices] = src[srcslices] print(src) print(dest) @@ -113,6 +116,7 @@ def copypaste_arrays(src, dest, srccenter, destcenter, dims): [4 7 1 8 0 0] [0 0 0 0 0 0] [0 0 0 0 0 0]] + """ srcslices = [slice(None)] * src.ndim destslices = [slice(None)] * dest.ndim @@ -132,10 +136,10 @@ def copypaste_arrays(src, dest, srccenter, destcenter, dims): def resize_center(img, *resize_dims, fill_value=0): """ - Resize `img' by cropping or expanding the image from the center. The `resizeDims' values are the output dimensions - (or None to use original dimension of `img'). If a dimension is smaller than that of `img' then the result will be - cropped and if larger padded with zeros, in both cases this is done relative to the center of `img'. The result is - a new image with the specified dimensions and values from `img' copied into its center. + Resize `img` by cropping or expanding the image from the center. The `resize_dims` values are the output dimensions + (or None to use original dimension of `img`). If a dimension is smaller than that of `img` then the result will be + cropped and if larger padded with zeros, in both cases this is done relative to the center of `img`. The result is + a new image with the specified dimensions and values from `img` copied into its center. """ resize_dims = tuple(resize_dims[i] or img.shape[i] for i in range(len(resize_dims))) @@ -151,7 +155,7 @@ def resize_center(img, *resize_dims, fill_value=0): def one_hot(labels, num_classes): """ - Converts label image `labels' to a one-hot vector with `num_classes' number of channels as last dimension. + Converts label image `labels` to a one-hot vector with `num_classes` number of channels as last dimension. """ labels = labels % num_classes y = np.eye(num_classes) @@ -163,6 +167,7 @@ def one_hot(labels, num_classes): def generate_pos_neg_label_crop_centers(label, size, num_samples, pos_ratio, rand_state=np.random): """Generate valid sample locations based on image with option for specifying foreground ratio Valid: samples sitting entirely within image, expected input shape: [C, H, W, D] or [C, H, W] + Args: label (numpy.ndarray): use the label data to get the foreground/background information. size (list or tuple): size of the ROIs to be sampled. @@ -246,11 +251,12 @@ def create_control_grid(spatial_shape, spacing, homogeneous=True, dtype=float): def create_rotate(spatial_dims, radians): """ create a 2D or 3D rotation matrix + Args: spatial_dims (2|3): spatial rank radians (float or a sequence of floats): rotation radians - when spatial_dims == 3, the `radians` sequence corresponds to - rotation in the 1st, 2nd, and 3rd dim respectively. + when spatial_dims == 3, the `radians` sequence corresponds to + rotation in the 1st, 2nd, and 3rd dim respectively. """ radians = ensure_tuple(radians) if spatial_dims == 2: diff --git a/monai/utils/misc.py b/monai/utils/misc.py index 261e521adb..d1cbed9f87 100644 --- a/monai/utils/misc.py +++ b/monai/utils/misc.py @@ -17,7 +17,7 @@ def zip_with(op, *vals, mapfunc=map): """ - Map `op`, using `mapfunc`, to each tuple derived from zipping the iterables in `vals'. + Map `op`, using `mapfunc`, to each tuple derived from zipping the iterables in `vals`. """ return mapfunc(op, zip(*vals)) diff --git a/monai/visualize/img2tensorboard.py b/monai/visualize/img2tensorboard.py index 8fce996685..7498ce1c85 100644 --- a/monai/visualize/img2tensorboard.py +++ b/monai/visualize/img2tensorboard.py @@ -20,11 +20,10 @@ def _image3_animated_gif(imp, scale_factor=1): Function to actually create the animated gif. Args: imp: tuple of tag and a list of image tensors - scale_factor: amount to multiply values by (if the image data is between 0 and 1, using 255 for this value will - scale it to displayable range) + scale_factor: amount to multiply values by. if the image data is between 0 and 1, using 255 for this value will + scale it to displayable range """ - # x=numpy.random.randint(0,256,[10,10,10],numpy.uint8) (tag, ims) = imp ims = [ (np.asarray((ims[:, :, i])) * scale_factor).astype(np.uint8) @@ -63,8 +62,8 @@ def make_animated_gif_summary(tag, animation_axes: axis to animate on (not currently used) image_axes: axes of image (not currently used) other_indices: (not currently used) - scale_factor: amount to multiply values by (if the image data is between 0 and 1, using 255 for this value will - scale it to displayable range) + scale_factor: amount to multiply values by. + if the image data is between 0 and 1, using 255 for this value will scale it to displayable range """ if max_out == 1: @@ -101,8 +100,8 @@ def add_animated_gif(writer, tag, image_tensor, max_out, scale_factor, global_st tag: Data identifier image_tensor: tensor for the image to add, expected to be in CDHW format max_out: maximum number of slices to animate through - scale_factor: amount to multiply values by (if the image data is between 0 and 1, using 255 for this value will - scale it to displayable range) + scale_factor: amount to multiply values by. If the image data is between 0 and 1, using 255 for this value will + scale it to displayable range global_step: Global step value to record """ writer._get_file_writer().add_summary(make_animated_gif_summary(tag, image_tensor, max_out=max_out, @@ -119,8 +118,8 @@ def add_animated_gif_no_channels(writer, tag, image_tensor, max_out, scale_facto tag: Data identifier image_tensor: tensor for the image to add, expected to be in DHW format max_out: maximum number of slices to animate through - scale_factor: amount to multiply values by (if the image data is between 0 and 1, using 255 for this value will - scale it to displayable range) + scale_factor: amount to multiply values by. If the image data is between 0 and 1, using 255 for this value will + scale it to displayable range global_step: Global step value to record """ writer._get_file_writer().add_summary(make_animated_gif_summary(tag, image_tensor.unsqueeze(0), diff --git a/tests/test_delete_keys.py b/tests/test_delete_keys.py index 35917e36f5..3bc1d0f11a 100644 --- a/tests/test_delete_keys.py +++ b/tests/test_delete_keys.py @@ -18,20 +18,21 @@ TEST_CASE_1 = [ {'keys': [str(i) for i in range(30)]}, 20, - 648, ] class TestDeleteKeysd(unittest.TestCase): @parameterized.expand([TEST_CASE_1]) - def test_memory(self, input_param, expected_key_size, expected_mem_size): + def test_memory(self, input_param, expected_key_size): input_data = dict() for i in range(50): input_data[str(i)] = [time.time()] * 100000 result = DeleteKeysd(**input_param)(input_data) self.assertEqual(len(result.keys()), expected_key_size) - self.assertEqual(sys.getsizeof(result), expected_mem_size) + self.assertGreaterEqual( + sys.getsizeof(input_data) * float(expected_key_size) / len(input_data), + sys.getsizeof(result)) if __name__ == '__main__': diff --git a/tests/test_map_transform.py b/tests/test_map_transform.py index bfddfa37b2..10878aa8f9 100644 --- a/tests/test_map_transform.py +++ b/tests/test_map_transform.py @@ -13,7 +13,7 @@ from parameterized import parameterized -from monai.transforms.composables import MapTransform +from monai.transforms.compose import MapTransform TEST_CASES = [ ['item', ('item',)],