Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 74 additions & 25 deletions .github/workflows/integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ on:
type: [integration-test-command]

jobs:
integration-py3:
integration-auto3dseg:
container:
image: nvcr.io/nvidia/pytorch:22.04-py3 # CUDA 11.6 py38
options: --gpus "device=0" --ipc host # shm-size 4g works fine
options: --gpus "device=1" --ipc host # shm-size 4g works fine
runs-on: [self-hosted, linux, x64, command]
steps:
# checkout the pull request branch
Expand All @@ -20,8 +20,7 @@ jobs:
ref: ${{ github.event.client_payload.pull_request.head.ref }}
- name: cache weekly timestamp
id: pip-cache
run: |
echo "datew=$(date '+%Y-%V')" >> $GITHUB_OUTPUT
run: echo "datew=$(date '+%Y-%V')" >> $GITHUB_OUTPUT
- name: cache for pip
uses: actions/cache@v3
id: cache
Expand All @@ -32,20 +31,20 @@ jobs:
key: docker-py3-pip-${{ steps.pip-cache.outputs.datew }}
- name: Install the dependencies
run: |
which python
pwd && git log -1 && which python
python -m pip install --upgrade pip wheel
pip uninstall -y monai
pip uninstall -y monai
pip uninstall -y monai-weekly
pip uninstall -y monai-weekly
python -m pip install --upgrade torch torchvision torchaudio
python -m pip install --upgrade torch torchvision torchaudio torchtext
python -m pip install -r requirements-dev.txt
rm -rf /github/home/.cache/torch/hub/mmars/
- name: Clean directory
run: |
python -m pip list
git config --global --add safe.directory /__w/MONAI/MONAI
git clean -ffdx
git clean -ffdx && git reset --hard HEAD
nvidia-smi
export CUDA_VISIBLE_DEVICES=$(python -m tests.utils -c 1 | tail -n 1)
echo $CUDA_VISIBLE_DEVICES
Expand All @@ -57,19 +56,79 @@ jobs:
env:
BUILD_MONAI: 0
run: |
BUILD_MONAI=0 ./runtests.sh --build
pwd && git log -1 && which python
./runtests.sh -b
python -m tests.test_auto3dseg_ensemble
python -m tests.test_auto3dseg_hpo
python -m tests.test_integration_autorunner
python -m tests.test_integration_gpu_customization
- name: Integration tests
shell: bash
env:
BUILD_MONAI: 1
run: ./runtests.sh --build --net

- name: Add reaction
uses: peter-evans/create-or-update-comment@v2
if: github.event.pull_request.number != ''
with:
token: ${{ secrets.PR_MAINTAIN }}
repository: ${{ github.event.client_payload.github.payload.repository.full_name }}
comment-id: ${{ github.event.client_payload.github.payload.comment.id }}
reaction-type: rocket


integration-unit:
container:
image: nvcr.io/nvidia/pytorch:22.04-py3 # CUDA 11.6 py38
options: --gpus "device=2" --ipc host # shm-size 4g works fine
runs-on: [self-hosted, linux, x64, command1]
steps:
# checkout the pull request branch
- uses: actions/checkout@v3
with:
token: ${{ secrets.PR_MAINTAIN }}
repository: ${{ github.event.client_payload.pull_request.head.repo.full_name }}
ref: ${{ github.event.client_payload.pull_request.head.ref }}
- name: cache weekly timestamp
id: pip-cache
run: echo "datew=$(date '+%Y-%V')" >> $GITHUB_OUTPUT
- name: cache for pip
uses: actions/cache@v3
id: cache
with:
path: |
~/.cache/pip
~/.cache/torch
key: docker-py3-pip-${{ steps.pip-cache.outputs.datew }}
- name: Install the dependencies
run: |
pwd && git log -1 && which python
python -m pip install --upgrade pip wheel
pip uninstall -y monai
pip uninstall -y monai
pip uninstall -y monai-weekly
pip uninstall -y monai-weekly
python -m pip install --upgrade torch torchvision torchaudio torchtext
python -m pip install -r requirements-dev.txt
rm -rf /github/home/.cache/torch/hub/mmars/
- name: Clean directory
run: |
python -m pip list
git config --global --add safe.directory /__w/MONAI/MONAI
git clean -ffdx && git reset --hard HEAD
nvidia-smi
export CUDA_VISIBLE_DEVICES=$(python -m tests.utils -c 1 | tail -n 1)
echo $CUDA_VISIBLE_DEVICES
python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))"
python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))'

- name: Auto3dseg latest algo
shell: bash
env:
BUILD_MONAI: 0
run: |
# test latest template
echo "test latest algo"
pwd
cd ../
rm -rf research-contributions
rm -rf algorithm_templates
Expand All @@ -82,28 +141,18 @@ jobs:
export OMP_NUM_THREADS=4
export MKL_NUM_THREADS=4
export MONAI_TESTING_ALGO_TEMPLATE=algorithm_templates
pwd && git log -1 && which python
./runtests.sh -b
python -m tests.test_auto3dseg_ensemble
python -m tests.test_auto3dseg_hpo
python -m tests.test_integration_autorunner
python -m tests.test_integration_gpu_customization

- name: Integration tests
shell: bash
env:
BUILD_MONAI: 1
run: |
pwd
ls -ll
./runtests.sh --build --net

- name: Unit tests
shell: bash
env:
BUILD_MONAI: 1
run: |
pwd
ls -ll
./runtests.sh --unittests
QUICKTEST: True
run: ./runtests.sh --build --unittests

- name: Add reaction
uses: peter-evans/create-or-update-comment@v2
Expand All @@ -112,4 +161,4 @@ jobs:
token: ${{ secrets.PR_MAINTAIN }}
repository: ${{ github.event.client_payload.github.payload.repository.full_name }}
comment-id: ${{ github.event.client_payload.github.payload.comment.id }}
reaction-type: rocket
reaction-type: +1
2 changes: 1 addition & 1 deletion monai/apps/auto3dseg/bundle_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
from monai.utils.enums import AlgoKeys

logger = get_logger(module_name=__name__)
ALGO_HASH = os.environ.get("MONAI_ALGO_HASH", "5b4e379")
ALGO_HASH = os.environ.get("MONAI_ALGO_HASH", "e619d26")

__all__ = ["BundleAlgo", "BundleGen"]

Expand Down
4 changes: 1 addition & 3 deletions monai/apps/detection/transforms/box_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,10 @@
from monai.data.box_utils import COMPUTE_DTYPE, TO_REMOVE, get_spatial_dims
from monai.transforms import Resize
from monai.transforms.utils import create_scale
from monai.utils import look_up_option, optional_import
from monai.utils import look_up_option
from monai.utils.misc import ensure_tuple, ensure_tuple_rep
from monai.utils.type_conversion import convert_data_type, convert_to_dst_type

scipy, _ = optional_import("scipy")


def _apply_affine_to_points(points: torch.Tensor, affine: torch.Tensor, include_shift: bool = True) -> torch.Tensor:
"""
Expand Down
1 change: 0 additions & 1 deletion monai/apps/pathology/transforms/post/dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
from monai.utils.enums import HoVerNetBranch

find_contours, _ = optional_import("skimage.measure", name="find_contours")
moments, _ = optional_import("skimage.measure", name="moments")

__all__ = [
"WatershedD",
Expand Down
4 changes: 2 additions & 2 deletions monai/inferers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ def sliding_window_inference(
else:
sw_device_buffer[ss] *= w_t
sw_device_buffer[ss] = sw_device_buffer[ss].to(device)
_compute_coords(sw_batch_size, unravel_slice, z_scale, output_image_list[ss], sw_device_buffer[ss])
_compute_coords(unravel_slice, z_scale, output_image_list[ss], sw_device_buffer[ss])
sw_device_buffer = []
if buffered:
b_s += 1
Expand Down Expand Up @@ -342,7 +342,7 @@ def _create_buffered_slices(slices, batch_size, sw_batch_size, buffer_dim, buffe
return slices, n_per_batch, b_slices, windows_range


def _compute_coords(sw, coords, z_scale, out, patch):
def _compute_coords(coords, z_scale, out, patch):
"""sliding window batch spatial scaling indexing for multi-resolution outputs."""
for original_idx, p in zip(coords, patch):
idx_zm = list(original_idx) # 4D for 2D image, 5D for 3D image
Expand Down
7 changes: 3 additions & 4 deletions monai/networks/layers/factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,9 @@ def use_factory(fact_args):

import torch.nn as nn

from monai.networks.utils import has_nvfuser_instance_norm
from monai.utils import look_up_option, optional_import

InstanceNorm3dNVFuser, has_nvfuser = optional_import("apex.normalization", name="InstanceNorm3dNVFuser")

__all__ = ["LayerFactory", "Dropout", "Norm", "Act", "Conv", "Pool", "Pad", "split_args"]


Expand Down Expand Up @@ -267,12 +266,12 @@ def instance_nvfuser_factory(dim):
warnings.warn(f"`InstanceNorm3dNVFuser` only supports 3d cases, use {types[dim - 1]} instead.")
return types[dim - 1]

if not has_nvfuser:
if not has_nvfuser_instance_norm():
warnings.warn(
"`apex.normalization.InstanceNorm3dNVFuser` is not installed properly, use nn.InstanceNorm3d instead."
)
return nn.InstanceNorm3d
return InstanceNorm3dNVFuser
return optional_import("apex.normalization", name="InstanceNorm3dNVFuser")[0]


Act.add_factory_callable("elu", lambda: nn.modules.ELU)
Expand Down
28 changes: 26 additions & 2 deletions monai/networks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@
import torch
import torch.nn as nn

from monai.apps.utils import get_logger, optional_import
from monai.apps.utils import get_logger
from monai.config import PathLike
from monai.utils.misc import ensure_tuple, save_obj, set_determinism
from monai.utils.module import look_up_option, pytorch_after
from monai.utils.module import look_up_option, optional_import, pytorch_after
from monai.utils.type_conversion import convert_to_dst_type, convert_to_tensor

onnx, _ = optional_import("onnx")
Expand Down Expand Up @@ -59,10 +59,34 @@
"replace_modules_temp",
"look_up_named_module",
"set_named_module",
"has_nvfuser_instance_norm",
]

logger = get_logger(module_name=__name__)

global _has_nvfuser
_has_nvfuser = None


def has_nvfuser_instance_norm():
"""whether the current environment has InstanceNorm3dNVFuser
https://github.com/NVIDIA/apex/blob/23.05-devel/apex/normalization/instance_norm.py#L15-L16
"""
global _has_nvfuser
if _has_nvfuser is not None:
return _has_nvfuser

_, _has_nvfuser = optional_import("apex.normalization", name="InstanceNorm3dNVFuser")
if not _has_nvfuser:
return False
try:
import importlib

importlib.import_module("instance_norm_nvfuser_cuda")
except ImportError:
_has_nvfuser = False
return _has_nvfuser


def look_up_named_module(name: str, mod, print_all_options=False):
"""
Expand Down
12 changes: 0 additions & 12 deletions tests/test_dynunet.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,8 @@

from monai.networks import eval_mode
from monai.networks.nets import DynUNet
from monai.utils import optional_import
from tests.utils import assert_allclose, skip_if_no_cuda, skip_if_windows, test_script_save

InstanceNorm3dNVFuser, _ = optional_import("apex.normalization", name="InstanceNorm3dNVFuser")

device = "cuda" if torch.cuda.is_available() else "cpu"

strides: Sequence[Sequence[int] | int]
Expand Down Expand Up @@ -128,15 +125,6 @@ def test_script(self):
@skip_if_no_cuda
@skip_if_windows
class TestDynUNetWithInstanceNorm3dNVFuser(unittest.TestCase):
def setUp(self):
try:
layer = InstanceNorm3dNVFuser(num_features=1, affine=False).to("cuda:0")
inp = torch.randn([1, 1, 1, 1, 1]).to("cuda:0")
out = layer(inp)
del inp, out, layer
except Exception:
self.skipTest("NVFuser not available")

@parameterized.expand([TEST_CASE_DYNUNET_3D[0]])
def test_consistency(self, input_param, input_shape, _):
for eps in [1e-4, 1e-5]:
Expand Down
42 changes: 1 addition & 41 deletions tests/test_lmdbdataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
from __future__ import annotations

import os
import shutil
import tempfile
import unittest

Expand All @@ -22,7 +21,7 @@

from monai.data import LMDBDataset, json_hashing
from monai.transforms import Compose, LoadImaged, SimulateDelayd, Transform
from tests.utils import DistCall, DistTestCase, skip_if_windows
from tests.utils import skip_if_windows

TEST_CASE_1 = [
Compose(
Expand Down Expand Up @@ -204,44 +203,5 @@ def test_shape(self, transform, expected_shape, kwargs=None):
dataset_postcached.set_data(data=test_data_new) # filename list updated, files do not exist


@skip_if_windows
class TestMPLMDBDataset(DistTestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()

def tearDown(self):
shutil.rmtree(self.tempdir)

@DistCall(nnodes=1, nproc_per_node=1)
def test_mp_cache(self):
items = [[list(range(i))] for i in range(5)]

ds = LMDBDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir, lmdb_kwargs={"map_size": 10 * 1024})
self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]])
ds1 = LMDBDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir, lmdb_kwargs={"map_size": 10 * 1024})
self.assertEqual(list(ds1), list(ds))
self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]])

ds = LMDBDataset(
items,
transform=_InplaceXform(),
cache_dir=self.tempdir,
lmdb_kwargs={"map_size": 10 * 1024},
hash_func=json_hashing,
)
self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]])
ds1 = LMDBDataset(
items,
transform=_InplaceXform(),
cache_dir=self.tempdir,
lmdb_kwargs={"map_size": 10 * 1024},
hash_func=json_hashing,
)
self.assertEqual(list(ds1), list(ds))
self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]])

self.assertTrue(isinstance(ds1.info(), dict))


if __name__ == "__main__":
unittest.main()