Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
080f57c
staging
wyli Mar 27, 2023
60037e0
opt
wyli Mar 30, 2023
1e144eb
valid
wyli Mar 31, 2023
0c11323
update
wyli Mar 31, 2023
011854f
fixes copying
wyli Mar 31, 2023
9bb2b4d
more tests
wyli Mar 31, 2023
857d286
update
wyli Mar 31, 2023
bbb91fe
update api
wyli Mar 31, 2023
efd796a
update
wyli Mar 31, 2023
9a36eee
fixes test cases
wyli Mar 31, 2023
e52effe
optimize
wyli Apr 1, 2023
2b0c25b
adds test cases
wyli Apr 1, 2023
3bb4a53
update type
wyli Apr 1, 2023
f56888d
mmore tests
wyli Apr 1, 2023
bd42c16
update
wyli Apr 1, 2023
8cd28e9
nonblocking copy
wyli Apr 1, 2023
10d665f
nonblocking copy
wyli Apr 1, 2023
f5211df
docs
wyli Apr 1, 2023
e85efe9
simplify slice1
wyli Apr 2, 2023
ad8c9f8
Merge remote-tracking branch 'upstream/dev' into sliding-windows
wyli Apr 2, 2023
e879a9c
update
wyli Apr 2, 2023
d787eef
simplify non-blocking flag when buffered=True
wyli Apr 2, 2023
545ad5e
fixes no cuda
wyli Apr 2, 2023
b55b892
update docs
wyli Apr 3, 2023
1ac4f78
prepare weight map dims
wyli Apr 3, 2023
2c8248d
compatible sliding_window_hovernet_inference
wyli Apr 3, 2023
f80654e
Merge branch 'dev' into sliding-windows
wyli Apr 4, 2023
ab89dd9
simplify
wyli Apr 5, 2023
870623c
Merge remote-tracking branch 'upstream/dev' into sliding-windows
wyli Apr 5, 2023
e606d55
remove existing
wyli Apr 5, 2023
d77bb90
update
wyli Apr 5, 2023
be570b4
Merge branch 'dev' into sliding-windows
wyli Apr 5, 2023
b4a8a9a
update
wyli Apr 5, 2023
a0cf134
update
wyli Apr 5, 2023
e390bc2
Merge branch 'temp-tests' into sliding-windows
wyli Apr 5, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions .github/workflows/integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ jobs:
run: |
which python
python -m pip install --upgrade pip wheel
pip uninstall -y monai
pip uninstall -y monai
pip uninstall -y monai-weekly
pip uninstall -y monai-weekly
python -m pip install --upgrade torch torchvision torchaudio
python -m pip install -r requirements-dev.txt
rm -rf /github/home/.cache/torch/hub/mmars/
Expand All @@ -51,18 +55,24 @@ jobs:
python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))'

# test auto3dseg
echo "test tag algo"
BUILD_MONAI=0 ./runtests.sh --build
python -m tests.test_auto3dseg_ensemble
python -m tests.test_auto3dseg_hpo
python -m tests.test_integration_autorunner
python -m tests.test_integration_gpu_customization

# test latest template
echo "test latest algo"
cd ../
rm -rf research-contributions
rm -rf algorithm_templates
git clone --depth 1 --branch main --single-branch https://github.com/Project-MONAI/research-contributions.git
ls research-contributions/
cp -r research-contributions/auto3dseg/algorithm_templates ../MONAI/
cd research-contributions && git log -1 && cd ..
cp -r research-contributions/auto3dseg/algorithm_templates MONAI/
cd research-contributions && git log -1 && cd ../MONAI
pwd
ls -ll
export OMP_NUM_THREADS=4
export MKL_NUM_THREADS=4
export MONAI_TESTING_ALGO_TEMPLATE=algorithm_templates
Expand All @@ -72,6 +82,9 @@ jobs:
python -m tests.test_integration_gpu_customization

# the other tests
echo "the other tests"
pwd
ls -ll
BUILD_MONAI=1 ./runtests.sh --build --net
BUILD_MONAI=1 ./runtests.sh --build --unittests
if pgrep python; then pkill python; fi
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -149,3 +149,4 @@ tests/testing_data/CT_2D_head_moving.mha

# profiling results
*.prof
runs
2 changes: 2 additions & 0 deletions monai/apps/pathology/inferers/inferer.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@ def __call__(
self.progress,
self.roi_weight_map,
self.process_output,
self.buffer_steps,
self.buffer_dim,
*args,
**kwargs,
)
Expand Down
12 changes: 10 additions & 2 deletions monai/data/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def iter_patch_slices(


def dense_patch_slices(
image_size: Sequence[int], patch_size: Sequence[int], scan_interval: Sequence[int]
image_size: Sequence[int], patch_size: Sequence[int], scan_interval: Sequence[int], return_slice: bool = True
) -> list[tuple[slice, ...]]:
"""
Enumerate all slices defining ND patches of size `patch_size` from an `image_size` input image.
Expand All @@ -172,6 +172,7 @@ def dense_patch_slices(
image_size: dimensions of image to iterate over
patch_size: size of patches to generate slices
scan_interval: dense patch sampling interval
return_slice: whether to return a list of slices (or tuples of indices), defaults to True

Returns:
a list of slice objects defining each patch
Expand Down Expand Up @@ -199,7 +200,9 @@ def dense_patch_slices(
dim_starts.append(start_idx)
starts.append(dim_starts)
out = np.asarray([x.flatten() for x in np.meshgrid(*starts, indexing="ij")]).T
return [tuple(slice(s, s + patch_size[d]) for d, s in enumerate(x)) for x in out]
if return_slice:
return [tuple(slice(s, s + patch_size[d]) for d, s in enumerate(x)) for x in out]
return [tuple((s, s + patch_size[d]) for d, s in enumerate(x)) for x in out] # type: ignore


def iter_patch_position(
Expand Down Expand Up @@ -1056,6 +1059,7 @@ def compute_importance_map(
mode: BlendMode | str = BlendMode.CONSTANT,
sigma_scale: Sequence[float] | float = 0.125,
device: torch.device | int | str = "cpu",
dtype: torch.dtype | str | None = torch.float32,
) -> torch.Tensor:
"""Get importance map for different weight modes.

Expand All @@ -1070,6 +1074,7 @@ def compute_importance_map(
sigma_scale: Sigma_scale to calculate sigma for each dimension
(sigma = sigma_scale * dim_size). Used for gaussian mode only.
device: Device to put importance map on.
dtype: Data type of the output importance map.

Raises:
ValueError: When ``mode`` is not one of ["constant", "gaussian"].
Expand All @@ -1096,6 +1101,9 @@ def compute_importance_map(
raise ValueError(
f"Unsupported mode: {mode}, available options are [{BlendMode.CONSTANT}, {BlendMode.CONSTANT}]."
)
# handle non-positive weights
min_non_zero = max(torch.min(importance_map).item(), 1e-3)
importance_map = torch.clamp_(importance_map.to(torch.float), min=min_non_zero).to(dtype)
return importance_map


Expand Down
12 changes: 11 additions & 1 deletion monai/inferers/inferer.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,6 +366,9 @@ class SlidingWindowInferer(Inferer):
cpu_thresh: when provided, dynamically switch to stitching on cpu (to save gpu memory)
when input image volume is larger than this threshold (in pixels/voxels).
Otherwise use ``"device"``. Thus, the output may end-up on either cpu or gpu.
buffer_steps: the number of sliding window iterations before writing the outputs to ``device``.
default is None, no buffer.
buffer_dim: the dimension along which the buffer are created, default is 0.

Note:
``sw_batch_size`` denotes the max number of windows per network inference iteration,
Expand All @@ -387,6 +390,8 @@ def __init__(
progress: bool = False,
cache_roi_weight_map: bool = False,
cpu_thresh: int | None = None,
buffer_steps: int | None = None,
buffer_dim: int = 0,
) -> None:
super().__init__()
self.roi_size = roi_size
Expand All @@ -400,6 +405,8 @@ def __init__(
self.device = device
self.progress = progress
self.cpu_thresh = cpu_thresh
self.buffer_steps = buffer_steps
self.buffer_dim = buffer_dim

# compute_importance_map takes long time when computing on cpu. We thus
# compute it once if it's static and then save it for future usage
Expand All @@ -415,7 +422,8 @@ def __init__(
warnings.warn("cache_roi_weight_map=True, but cache is not created. (dynamic roi_size?)")
except BaseException as e:
raise RuntimeError(
"Seems to be OOM. Please try smaller roi_size, or use mode='constant' instead of mode='gaussian'. "
f"roi size {self.roi_size}, mode={mode}, sigma_scale={sigma_scale}, device={device}\n"
"Seems to be OOM. Please try smaller patch size or mode='constant' instead of mode='gaussian'."
) from e

def __call__(
Expand Down Expand Up @@ -455,6 +463,8 @@ def __call__(
self.progress,
self.roi_weight_map,
None,
self.buffer_steps,
self.buffer_dim,
*args,
**kwargs,
)
Expand Down
Loading