From eef70a78912826a9f59033bb097e034c102adaab Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Mon, 6 Jan 2025 18:45:21 +0800 Subject: [PATCH 01/55] 8274 Relax gpu load check (#8282) Related to #8274 , this PR is used to check potential issues. When I used the same environment as the nightly test, the error was not reproduced. Therefore, I hope the new change can show more information about the error. ### Description A few sentences describing the changes proposed in this pull request. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: Yiheng Wang Signed-off-by: Can-Zhao --- tests/test_load_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_load_image.py b/tests/test_load_image.py index aa8b71b7fa..dc0af5e97e 100644 --- a/tests/test_load_image.py +++ b/tests/test_load_image.py @@ -233,7 +233,7 @@ def test_nibabel_reader_gpu(self, input_param, filenames, expected_shape): input_param_cpu = input_param.copy() input_param_cpu["to_gpu"] = False result_cpu = LoadImage(image_only=True, **input_param_cpu)(filenames) - self.assertTrue(torch.allclose(result_cpu, result.cpu(), atol=1e-6)) + assert_allclose(result_cpu, result.cpu(), atol=1e-6) @parameterized.expand([TEST_CASE_6, TEST_CASE_7, TEST_CASE_8, TEST_CASE_8_1, TEST_CASE_9]) def test_itk_reader(self, input_param, filenames, expected_shape): From f650feb2ffefb9ac7dade5fd6787f43c01010a83 Mon Sep 17 00:00:00 2001 From: Pooya Mohammadi Kazaj Date: Fri, 10 Jan 2025 16:35:44 +0100 Subject: [PATCH 02/55] bug: Fix PatchMerging duplicate merging (#8285) Fixes # . ### Description A few sentences describing the changes proposed in this pull request. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). Fixing issue #8284 In this format there are no duplicates: ``` t = [ (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 0, 1), (1, 1, 0), (0, 1, 1), (1, 1, 1), ] print(set(t)) # {(1, 0, 1), (1, 1, 0), (0, 1, 0), (0, 0, 0), (1, 0, 0), (0, 0, 1), (1, 1, 1), (0, 1, 1)} ``` --------- Signed-off-by: pooya-mohammadi Signed-off-by: Can-Zhao --- monai/networks/nets/swin_unetr.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/monai/networks/nets/swin_unetr.py b/monai/networks/nets/swin_unetr.py index 77f0d2ec2f..cfc5dda41f 100644 --- a/monai/networks/nets/swin_unetr.py +++ b/monai/networks/nets/swin_unetr.py @@ -782,9 +782,9 @@ def forward(self, x): x1 = x[:, 1::2, 0::2, 0::2, :] x2 = x[:, 0::2, 1::2, 0::2, :] x3 = x[:, 0::2, 0::2, 1::2, :] - x4 = x[:, 1::2, 0::2, 1::2, :] - x5 = x[:, 0::2, 1::2, 0::2, :] - x6 = x[:, 0::2, 0::2, 1::2, :] + x4 = x[:, 1::2, 1::2, 0::2, :] + x5 = x[:, 1::2, 0::2, 1::2, :] + x6 = x[:, 0::2, 1::2, 1::2, :] x7 = x[:, 1::2, 1::2, 1::2, :] x = torch.cat([x0, x1, x2, x3, x4, x5, x6, x7], -1) x = self.norm(x) From 5da95c8666b7ba1614e9eec0ada723f04f7a4d3b Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Tue, 14 Jan 2025 15:27:12 +0800 Subject: [PATCH 03/55] Fix test load image issue (#8297) Fixes https://github.com/Project-MONAI/MONAI/issues/8274 . ### Description The new test has already tested with the same 24.08 + A100 env. I did some tests but cannot reproduce the original test case error (there are NaN values or significant small/large data). Since only 24.08 base image has the issue (24.10 does not have), I decided to use a different test case for 24.08 and prepared this PR ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: Yiheng Wang Signed-off-by: Can-Zhao --- tests/test_load_image.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/test_load_image.py b/tests/test_load_image.py index dc0af5e97e..498b9972b4 100644 --- a/tests/test_load_image.py +++ b/tests/test_load_image.py @@ -217,7 +217,12 @@ def test_nibabel_reader(self, input_param, filenames, expected_shape): @SkipIfNoModule("kvikio") @parameterized.expand([TEST_CASE_GPU_1, TEST_CASE_GPU_2, TEST_CASE_GPU_3, TEST_CASE_GPU_4]) def test_nibabel_reader_gpu(self, input_param, filenames, expected_shape): - test_image = np.random.rand(128, 128, 128) + if torch.__version__.endswith("nv24.8"): + # related issue: https://github.com/Project-MONAI/MONAI/issues/8274 + # for this version, use randint test case to avoid the issue + test_image = torch.randint(0, 256, (128, 128, 128), dtype=torch.uint8).numpy() + else: + test_image = np.random.rand(128, 128, 128) with tempfile.TemporaryDirectory() as tempdir: for i, name in enumerate(filenames): filenames[i] = os.path.join(tempdir, name) From d14b6bfdc31d3f65d78ee1019e2b8340a925a88b Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Wed, 15 Jan 2025 15:08:47 +0800 Subject: [PATCH 04/55] Using LocalStore in Zarr v3 (#8299) Fixes #8298 ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Signed-off-by: Can-Zhao --- tests/test_zarr_avg_merger.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/test_zarr_avg_merger.py b/tests/test_zarr_avg_merger.py index de7fad48da..a52dbceb4c 100644 --- a/tests/test_zarr_avg_merger.py +++ b/tests/test_zarr_avg_merger.py @@ -19,11 +19,18 @@ from torch.nn.functional import pad from monai.inferers import ZarrAvgMerger -from monai.utils import optional_import +from monai.utils import get_package_version, optional_import, version_geq from tests.utils import assert_allclose np.seterr(divide="ignore", invalid="ignore") zarr, has_zarr = optional_import("zarr") +if has_zarr: + if version_geq(get_package_version("zarr"), "3.0.0"): + directory_store = zarr.storage.LocalStore("test.zarr") + else: + directory_store = zarr.storage.DirectoryStore("test.zarr") +else: + directory_store = None numcodecs, has_numcodecs = optional_import("numcodecs") TENSOR_4x4 = torch.randint(low=0, high=255, size=(2, 3, 4, 4), dtype=torch.float32) @@ -154,7 +161,7 @@ # explicit directory store TEST_CASE_10_DIRECTORY_STORE = [ - dict(merged_shape=TENSOR_4x4.shape, store=zarr.storage.DirectoryStore("test.zarr")), + dict(merged_shape=TENSOR_4x4.shape, store=directory_store), [ (TENSOR_4x4[..., :2, :2], (0, 0)), (TENSOR_4x4[..., :2, 2:], (0, 2)), From e516098e0064477ee3a47c8ce829b9ba2c5b41bf Mon Sep 17 00:00:00 2001 From: advcu <65158236+advcu987@users.noreply.github.com> Date: Mon, 20 Jan 2025 07:26:06 +0100 Subject: [PATCH 05/55] 8267 fix normalize intensity (#8286) Fixes #8267 . ### Description Fix channel-wise intensity normalization for integer type inputs. ### Types of changes - [ ] Non-breaking change (fix or new feature that would not break existing functionality). - [x] Breaking change (fix or new feature that would cause existing functionality to change). - [x] New tests added to cover the changes. - [x] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [x] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [x] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: advcu987 Signed-off-by: advcu <65158236+advcu987@users.noreply.github.com> Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/transforms/intensity/array.py | 4 ++++ tests/test_normalize_intensity.py | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 20000c52c4..8fe658ad3e 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -821,6 +821,7 @@ class NormalizeIntensity(Transform): mean and std on each channel separately. When `channel_wise` is True, the first dimension of `subtrahend` and `divisor` should be the number of image channels if they are not None. + If the input is not of floating point type, it will be converted to float32 Args: subtrahend: the amount to subtract by (usually the mean). @@ -907,6 +908,9 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: if self.divisor is not None and len(self.divisor) != len(img): raise ValueError(f"img has {len(img)} channels, but divisor has {len(self.divisor)} components.") + if not img.dtype.is_floating_point: + img, *_ = convert_data_type(img, dtype=torch.float32) + for i, d in enumerate(img): img[i] = self._normalize( # type: ignore d, diff --git a/tests/test_normalize_intensity.py b/tests/test_normalize_intensity.py index 72ebf579e1..7efd0d83e5 100644 --- a/tests/test_normalize_intensity.py +++ b/tests/test_normalize_intensity.py @@ -108,6 +108,27 @@ def test_channel_wise(self, im_type): normalized = normalizer(input_data) assert_allclose(normalized, im_type(expected), type_test="tensor") + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_channel_wise_int(self, im_type): + normalizer = NormalizeIntensity(nonzero=True, channel_wise=True) + input_data = im_type(torch.arange(1, 25).reshape(2, 3, 4)) + expected = np.array( + [ + [ + [-1.593255, -1.3035723, -1.0138896, -0.7242068], + [-0.4345241, -0.1448414, 0.1448414, 0.4345241], + [0.7242068, 1.0138896, 1.3035723, 1.593255], + ], + [ + [-1.593255, -1.3035723, -1.0138896, -0.7242068], + [-0.4345241, -0.1448414, 0.1448414, 0.4345241], + [0.7242068, 1.0138896, 1.3035723, 1.593255], + ], + ] + ) + normalized = normalizer(input_data) + assert_allclose(normalized, im_type(expected), type_test="tensor", rtol=1e-7, atol=1e-7) # tolerance + @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_value_errors(self, im_type): input_data = im_type(np.array([[0.0, 3.0, 0.0, 4.0], [0.0, 4.0, 0.0, 5.0]])) From 26ff1b6f3ea8c077a9df4e46a9a6c0321523655b Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Tue, 21 Jan 2025 08:25:39 +0800 Subject: [PATCH 06/55] Fix bundle download error from ngc source (#8307) Fixes #8306 This previous api has been deprecated, update based on: https://docs.ngc.nvidia.com/api/?urls.primaryName=Private%20Artifacts%20(Models)%20API#/artifact-file-controller/downloadAllArtifactFiles ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/bundle/scripts.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/monai/bundle/scripts.py b/monai/bundle/scripts.py index 131c78008b..5089f0c045 100644 --- a/monai/bundle/scripts.py +++ b/monai/bundle/scripts.py @@ -174,7 +174,7 @@ def _get_git_release_url(repo_owner: str, repo_name: str, tag_name: str, filenam def _get_ngc_bundle_url(model_name: str, version: str) -> str: - return f"{NGC_BASE_URL}/{model_name.lower()}/versions/{version}/zip" + return f"{NGC_BASE_URL}/{model_name.lower()}/versions/{version}/files" def _get_ngc_private_base_url(repo: str) -> str: @@ -218,6 +218,21 @@ def _remove_ngc_prefix(name: str, prefix: str = "monai_") -> str: return name +def _get_all_download_files(request_url: str, headers: dict | None = None) -> list[str]: + if not has_requests: + raise ValueError("requests package is required, please install it.") + headers = {} if headers is None else headers + response = requests_get(request_url, headers=headers) + response.raise_for_status() + model_info = json.loads(response.text) + + if not isinstance(model_info, dict) or "modelFiles" not in model_info: + raise ValueError("The data is not a dictionary or it does not have the key 'modelFiles'.") + + model_files = model_info["modelFiles"] + return [f["path"] for f in model_files] + + def _download_from_ngc( download_path: Path, filename: str, @@ -229,12 +244,12 @@ def _download_from_ngc( # ensure prefix is contained filename = _add_ngc_prefix(filename, prefix=prefix) url = _get_ngc_bundle_url(model_name=filename, version=version) - filepath = download_path / f"{filename}_v{version}.zip" if remove_prefix: filename = _remove_ngc_prefix(filename, prefix=remove_prefix) - extract_path = download_path / f"{filename}" - download_url(url=url, filepath=filepath, hash_val=None, progress=progress) - extractall(filepath=filepath, output_dir=extract_path, has_base=True) + filepath = download_path / filename + filepath.mkdir(parents=True, exist_ok=True) + for file in _get_all_download_files(url): + download_url(url=f"{url}/{file}", filepath=f"{filepath}/{file}", hash_val=None, progress=progress) def _download_from_ngc_private( From 8f4bdcff14feb9c2bb3141e6eb10e13418ce2b54 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Fri, 24 Jan 2025 23:00:48 +0800 Subject: [PATCH 07/55] Fix deprecated usage in zarr (#8313) Fixes #8298 ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/inferers/merger.py | 23 +++++++++++++++++++---- tests/test_zarr_avg_merger.py | 7 ++++--- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/monai/inferers/merger.py b/monai/inferers/merger.py index d01d334142..1344207e18 100644 --- a/monai/inferers/merger.py +++ b/monai/inferers/merger.py @@ -15,12 +15,13 @@ from abc import ABC, abstractmethod from collections.abc import Sequence from contextlib import nullcontext +from tempfile import TemporaryDirectory from typing import TYPE_CHECKING, Any import numpy as np import torch -from monai.utils import ensure_tuple_size, optional_import, require_pkg +from monai.utils import ensure_tuple_size, get_package_version, optional_import, require_pkg, version_geq if TYPE_CHECKING: import zarr @@ -233,7 +234,7 @@ def __init__( store: zarr.storage.Store | str = "merged.zarr", value_store: zarr.storage.Store | str | None = None, count_store: zarr.storage.Store | str | None = None, - compressor: str = "default", + compressor: str | None = None, value_compressor: str | None = None, count_compressor: str | None = None, chunks: Sequence[int] | bool = True, @@ -246,8 +247,22 @@ def __init__( self.value_dtype = value_dtype self.count_dtype = count_dtype self.store = store - self.value_store = zarr.storage.TempStore() if value_store is None else value_store - self.count_store = zarr.storage.TempStore() if count_store is None else count_store + self.tmpdir: TemporaryDirectory | None + if version_geq(get_package_version("zarr"), "3.0.0"): + if value_store is None: + self.tmpdir = TemporaryDirectory() + self.value_store = zarr.storage.LocalStore(self.tmpdir.name) + else: + self.value_store = value_store + if count_store is None: + self.tmpdir = TemporaryDirectory() + self.count_store = zarr.storage.LocalStore(self.tmpdir.name) + else: + self.count_store = count_store + else: + self.tmpdir = None + self.value_store = zarr.storage.TempStore() if value_store is None else value_store + self.count_store = zarr.storage.TempStore() if count_store is None else count_store self.chunks = chunks self.compressor = compressor self.value_compressor = value_compressor diff --git a/tests/test_zarr_avg_merger.py b/tests/test_zarr_avg_merger.py index a52dbceb4c..3c89e4fb03 100644 --- a/tests/test_zarr_avg_merger.py +++ b/tests/test_zarr_avg_merger.py @@ -287,15 +287,16 @@ class ZarrAvgMergerTests(unittest.TestCase): ] ) def test_zarr_avg_merger_patches(self, arguments, patch_locations, expected): + codec_reg = numcodecs.registry.codec_registry if "compressor" in arguments: if arguments["compressor"] != "default": - arguments["compressor"] = zarr.codec_registry[arguments["compressor"].lower()]() + arguments["compressor"] = codec_reg[arguments["compressor"].lower()]() if "value_compressor" in arguments: if arguments["value_compressor"] != "default": - arguments["value_compressor"] = zarr.codec_registry[arguments["value_compressor"].lower()]() + arguments["value_compressor"] = codec_reg[arguments["value_compressor"].lower()]() if "count_compressor" in arguments: if arguments["count_compressor"] != "default": - arguments["count_compressor"] = zarr.codec_registry[arguments["count_compressor"].lower()]() + arguments["count_compressor"] = codec_reg[arguments["count_compressor"].lower()]() merger = ZarrAvgMerger(**arguments) for pl in patch_locations: merger.aggregate(pl[0], pl[1]) From 106a3c811736889fdeebaa3fbd9701645e67b41f Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Tue, 28 Jan 2025 07:54:49 +0800 Subject: [PATCH 08/55] update pydicom reader to enable gpu load (#8283) Related to #8241 . ### Description A few sentences describing the changes proposed in this pull request. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: Yiheng Wang Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/data/image_reader.py | 219 ++++++++++++++++++++++++++++--------- tests/test_load_image.py | 58 +++++++++- 2 files changed, 222 insertions(+), 55 deletions(-) diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 5bc38f69ea..003ec2cf0b 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -418,6 +418,10 @@ class PydicomReader(ImageReader): If provided, only the matched files will be included. For example, to include the file name "image_0001.dcm", the regular expression could be `".*image_(\\d+).dcm"`. Default to `""`. Set it to `None` to use `pydicom.misc.is_dicom` to match valid files. + to_gpu: If True, load the image into GPU memory using CuPy and Kvikio. This can accelerate data loading. + Default is False. CuPy and Kvikio are required for this option. + In practical use, it's recommended to add a warm up call before the actual loading. + A related tutorial will be prepared in the future, and the document will be updated accordingly. kwargs: additional args for `pydicom.dcmread` API. more details about available args: https://pydicom.github.io/pydicom/stable/reference/generated/pydicom.filereader.dcmread.html If the `get_data` function will be called @@ -434,6 +438,7 @@ def __init__( prune_metadata: bool = True, label_dict: dict | None = None, fname_regex: str = "", + to_gpu: bool = False, **kwargs, ): super().__init__() @@ -444,6 +449,33 @@ def __init__( self.prune_metadata = prune_metadata self.label_dict = label_dict self.fname_regex = fname_regex + if to_gpu and (not has_cp or not has_kvikio): + warnings.warn( + "PydicomReader: CuPy and/or Kvikio not installed for GPU loading, falling back to CPU loading." + ) + to_gpu = False + + if to_gpu: + self.warmup_kvikio() + + self.to_gpu = to_gpu + + def warmup_kvikio(self): + """ + Warm up the Kvikio library to initialize the internal buffers, cuFile, GDS, etc. + This can accelerate the data loading process when `to_gpu` is set to True. + """ + if has_cp and has_kvikio: + a = cp.arange(100) + with tempfile.NamedTemporaryFile() as tmp_file: + tmp_file_name = tmp_file.name + f = kvikio.CuFile(tmp_file_name, "w") + f.write(a) + f.close() + + b = cp.empty_like(a) + f = kvikio.CuFile(tmp_file_name, "r") + f.read(b) def verify_suffix(self, filename: Sequence[PathLike] | PathLike) -> bool: """ @@ -475,12 +507,15 @@ def read(self, data: Sequence[PathLike] | PathLike, **kwargs): img_ = [] filenames: Sequence[PathLike] = ensure_tuple(data) + self.filenames = list(filenames) kwargs_ = self.kwargs.copy() + if self.to_gpu: + kwargs["defer_size"] = "100 KB" kwargs_.update(kwargs) self.has_series = False - for name in filenames: + for i, name in enumerate(filenames): name = f"{name}" if Path(name).is_dir(): # read DICOM series @@ -489,20 +524,28 @@ def read(self, data: Sequence[PathLike] | PathLike, **kwargs): else: series_slcs = [slc for slc in glob.glob(os.path.join(name, "*")) if pydicom.misc.is_dicom(slc)] slices = [] + loaded_slc_names = [] for slc in series_slcs: try: slices.append(pydicom.dcmread(fp=slc, **kwargs_)) + loaded_slc_names.append(slc) except pydicom.errors.InvalidDicomError as e: warnings.warn(f"Failed to read {slc} with exception: \n{e}.", stacklevel=2) - img_.append(slices if len(slices) > 1 else slices[0]) if len(slices) > 1: self.has_series = True + img_.append(slices) + self.filenames[i] = loaded_slc_names # type: ignore + else: + img_.append(slices[0]) # type: ignore + self.filenames[i] = loaded_slc_names[0] # type: ignore else: ds = pydicom.dcmread(fp=name, **kwargs_) - img_.append(ds) - return img_ if len(filenames) > 1 else img_[0] + img_.append(ds) # type: ignore + if len(filenames) == 1: + return img_[0] + return img_ - def _combine_dicom_series(self, data: Iterable): + def _combine_dicom_series(self, data: Iterable, filenames: Sequence[PathLike]): """ Combine dicom series (a list of pydicom dataset objects). Their data arrays will be stacked together at a new dimension as the last dimension. @@ -522,28 +565,27 @@ def _combine_dicom_series(self, data: Iterable): """ slices: list = [] # for a dicom series - for slc_ds in data: + for slc_ds, filename in zip(data, filenames): if hasattr(slc_ds, "InstanceNumber"): - slices.append(slc_ds) + slices.append((slc_ds, filename)) else: - warnings.warn(f"slice: {slc_ds.filename} does not have InstanceNumber tag, skip it.") - slices = sorted(slices, key=lambda s: s.InstanceNumber) - + warnings.warn(f"slice: {filename} does not have InstanceNumber tag, skip it.") + slices = sorted(slices, key=lambda s: s[0].InstanceNumber) if len(slices) == 0: raise ValueError("the input does not have valid slices.") - first_slice = slices[0] + first_slice, first_filename = slices[0] average_distance = 0.0 - first_array = self._get_array_data(first_slice) + first_array = self._get_array_data(first_slice, first_filename) shape = first_array.shape - spacing = getattr(first_slice, "PixelSpacing", [1.0, 1.0, 1.0]) + spacing = getattr(first_slice, "PixelSpacing", [1.0] * len(shape)) prev_pos = getattr(first_slice, "ImagePositionPatient", (0.0, 0.0, 0.0))[2] stack_array = [first_array] for idx in range(1, len(slices)): - slc_array = self._get_array_data(slices[idx]) + slc_array = self._get_array_data(slices[idx][0], slices[idx][1]) slc_shape = slc_array.shape - slc_spacing = getattr(slices[idx], "PixelSpacing", (1.0, 1.0, 1.0)) - slc_pos = getattr(slices[idx], "ImagePositionPatient", (0.0, 0.0, float(idx)))[2] + slc_spacing = getattr(slices[idx][0], "PixelSpacing", [1.0] * len(shape)) + slc_pos = getattr(slices[idx][0], "ImagePositionPatient", (0.0, 0.0, float(idx)))[2] if not np.allclose(slc_spacing, spacing): warnings.warn(f"the list contains slices that have different spacings {spacing} and {slc_spacing}.") if shape != slc_shape: @@ -555,11 +597,14 @@ def _combine_dicom_series(self, data: Iterable): if len(slices) > 1: average_distance /= len(slices) - 1 spacing.append(average_distance) - stack_array = np.stack(stack_array, axis=-1) + if self.to_gpu: + stack_array = cp.stack(stack_array, axis=-1) + else: + stack_array = np.stack(stack_array, axis=-1) stack_metadata = self._get_meta_dict(first_slice) stack_metadata["spacing"] = np.asarray(spacing) - if hasattr(slices[-1], "ImagePositionPatient"): - stack_metadata["lastImagePositionPatient"] = np.asarray(slices[-1].ImagePositionPatient) + if hasattr(slices[-1][0], "ImagePositionPatient"): + stack_metadata["lastImagePositionPatient"] = np.asarray(slices[-1][0].ImagePositionPatient) stack_metadata[MetaKeys.SPATIAL_SHAPE] = shape + (len(slices),) else: stack_array = stack_array[0] @@ -597,29 +642,35 @@ def get_data(self, data) -> tuple[np.ndarray, dict]: if self.has_series is True: # a list, all objects within a list belong to one dicom series if not isinstance(data[0], list): - dicom_data.append(self._combine_dicom_series(data)) + # input is a dir, self.filenames is a list of list of filenames + dicom_data.append(self._combine_dicom_series(data, self.filenames[0])) # type: ignore # a list of list, each inner list represents a dicom series else: - for series in data: - dicom_data.append(self._combine_dicom_series(series)) + for i, series in enumerate(data): + dicom_data.append(self._combine_dicom_series(series, self.filenames[i])) # type: ignore else: # a single pydicom dataset object if not isinstance(data, list): data = [data] - for d in data: + for i, d in enumerate(data): if hasattr(d, "SegmentSequence"): - data_array, metadata = self._get_seg_data(d) + data_array, metadata = self._get_seg_data(d, self.filenames[i]) else: - data_array = self._get_array_data(d) + data_array = self._get_array_data(d, self.filenames[i]) metadata = self._get_meta_dict(d) metadata[MetaKeys.SPATIAL_SHAPE] = data_array.shape dicom_data.append((data_array, metadata)) + # TODO: the actual type is list[np.ndarray | cp.ndarray] + # should figure out how to define correct types without having cupy not found error + # https://github.com/Project-MONAI/MONAI/pull/8188#discussion_r1886645918 img_array: list[np.ndarray] = [] compatible_meta: dict = {} for data_array, metadata in ensure_tuple(dicom_data): - img_array.append(np.ascontiguousarray(np.swapaxes(data_array, 0, 1) if self.swap_ij else data_array)) + if self.swap_ij: + data_array = cp.swapaxes(data_array, 0, 1) if self.to_gpu else np.swapaxes(data_array, 0, 1) + img_array.append(cp.ascontiguousarray(data_array) if self.to_gpu else np.ascontiguousarray(data_array)) affine = self._get_affine(metadata, self.affine_lps_to_ras) metadata[MetaKeys.SPACE] = SpaceKeys.RAS if self.affine_lps_to_ras else SpaceKeys.LPS if self.swap_ij: @@ -641,7 +692,7 @@ def get_data(self, data) -> tuple[np.ndarray, dict]: _copy_compatible_dict(metadata, compatible_meta) - return _stack_images(img_array, compatible_meta), compatible_meta + return _stack_images(img_array, compatible_meta, to_cupy=self.to_gpu), compatible_meta def _get_meta_dict(self, img) -> dict: """ @@ -713,7 +764,7 @@ def _get_affine(self, metadata: dict, lps_to_ras: bool = True): affine = orientation_ras_lps(affine) return affine - def _get_frame_data(self, img) -> Iterator: + def _get_frame_data(self, img, filename, array_data) -> Iterator: """ yield frames and description from the segmentation image. This function is adapted from Highdicom: @@ -751,48 +802,54 @@ def _get_frame_data(self, img) -> Iterator: """ if not hasattr(img, "PerFrameFunctionalGroupsSequence"): - raise NotImplementedError( - f"To read dicom seg: {img.filename}, 'PerFrameFunctionalGroupsSequence' is required." - ) + raise NotImplementedError(f"To read dicom seg: {filename}, 'PerFrameFunctionalGroupsSequence' is required.") frame_seg_nums = [] for f in img.PerFrameFunctionalGroupsSequence: if not hasattr(f, "SegmentIdentificationSequence"): raise NotImplementedError( - f"To read dicom seg: {img.filename}, 'SegmentIdentificationSequence' is required for each frame." + f"To read dicom seg: {filename}, 'SegmentIdentificationSequence' is required for each frame." ) frame_seg_nums.append(int(f.SegmentIdentificationSequence[0].ReferencedSegmentNumber)) - frame_seg_nums_arr = np.array(frame_seg_nums) + frame_seg_nums_arr = cp.array(frame_seg_nums) if self.to_gpu else np.array(frame_seg_nums) seg_descriptions = {int(f.SegmentNumber): f for f in img.SegmentSequence} - for i in np.unique(frame_seg_nums_arr): - indices = np.where(frame_seg_nums_arr == i)[0] - yield (img.pixel_array[indices, ...], seg_descriptions[i]) + for i in np.unique(frame_seg_nums_arr) if not self.to_gpu else cp.unique(frame_seg_nums_arr): + indices = np.where(frame_seg_nums_arr == i)[0] if not self.to_gpu else cp.where(frame_seg_nums_arr == i)[0] + yield (array_data[indices, ...], seg_descriptions[i]) - def _get_seg_data(self, img): + def _get_seg_data(self, img, filename): """ Get the array data and metadata of the segmentation image. Aegs: img: a Pydicom dataset object that has attribute "SegmentSequence". + filename: the file path of the image. """ metadata = self._get_meta_dict(img) n_classes = len(img.SegmentSequence) - spatial_shape = list(img.pixel_array.shape) + array_data = self._get_array_data(img, filename) + spatial_shape = list(array_data.shape) spatial_shape[0] = spatial_shape[0] // n_classes if self.label_dict is not None: metadata["labels"] = self.label_dict - all_segs = np.zeros([*spatial_shape, len(self.label_dict)]) + if self.to_gpu: + all_segs = cp.zeros([*spatial_shape, len(self.label_dict)], dtype=array_data.dtype) + else: + all_segs = np.zeros([*spatial_shape, len(self.label_dict)], dtype=array_data.dtype) else: metadata["labels"] = {} - all_segs = np.zeros([*spatial_shape, n_classes]) + if self.to_gpu: + all_segs = cp.zeros([*spatial_shape, n_classes], dtype=array_data.dtype) + else: + all_segs = np.zeros([*spatial_shape, n_classes], dtype=array_data.dtype) - for i, (frames, description) in enumerate(self._get_frame_data(img)): + for i, (frames, description) in enumerate(self._get_frame_data(img, filename, array_data)): segment_label = getattr(description, "SegmentLabel", f"label_{i}") class_name = getattr(description, "SegmentDescription", segment_label) if class_name not in metadata["labels"].keys(): @@ -840,19 +897,79 @@ def _get_seg_data(self, img): return all_segs, metadata - def _get_array_data(self, img): + def _get_array_data_from_gpu(self, img, filename): + """ + Get the raw array data of the image. This function is used when `to_gpu` is set to True. + + Args: + img: a Pydicom dataset object. + filename: the file path of the image. + + """ + rows = getattr(img, "Rows", None) + columns = getattr(img, "Columns", None) + bits_allocated = getattr(img, "BitsAllocated", None) + samples_per_pixel = getattr(img, "SamplesPerPixel", 1) + number_of_frames = getattr(img, "NumberOfFrames", 1) + pixel_representation = getattr(img, "PixelRepresentation", 1) + + if rows is None or columns is None or bits_allocated is None: + warnings.warn( + f"dicom data: {filename} does not have Rows, Columns or BitsAllocated, falling back to CPU loading." + ) + + if not hasattr(img, "pixel_array"): + raise ValueError(f"dicom data: {filename} does not have pixel_array.") + data = img.pixel_array + + return data + + if bits_allocated == 8: + dtype = cp.int8 if pixel_representation == 1 else cp.uint8 + elif bits_allocated == 16: + dtype = cp.int16 if pixel_representation == 1 else cp.uint16 + elif bits_allocated == 32: + dtype = cp.int32 if pixel_representation == 1 else cp.uint32 + else: + raise ValueError("Unsupported BitsAllocated value") + + bytes_per_pixel = bits_allocated // 8 + total_pixels = rows * columns * samples_per_pixel * number_of_frames + expected_pixel_data_length = total_pixels * bytes_per_pixel + + pixel_data_tag = pydicom.tag.Tag(0x7FE0, 0x0010) + if pixel_data_tag not in img: + raise ValueError(f"dicom data: {filename} does not have pixel data.") + + offset = img.get_item(pixel_data_tag, keep_deferred=True).value_tell + + with kvikio.CuFile(filename, "r") as f: + buffer = cp.empty(expected_pixel_data_length, dtype=cp.int8) + f.read(buffer, expected_pixel_data_length, offset) + + new_shape = (number_of_frames, rows, columns) if number_of_frames > 1 else (rows, columns) + data = buffer.view(dtype).reshape(new_shape) + + return data + + def _get_array_data(self, img, filename): """ Get the array data of the image. If `RescaleSlope` and `RescaleIntercept` are available, the raw array data - will be rescaled. The output data has the dtype np.float32 if the rescaling is applied. + will be rescaled. The output data has the dtype float32 if the rescaling is applied. Args: img: a Pydicom dataset object. + filename: the file path of the image. """ # process Dicom series - if not hasattr(img, "pixel_array"): - raise ValueError(f"dicom data: {img.filename} does not have pixel_array.") - data = img.pixel_array + + if self.to_gpu: + data = self._get_array_data_from_gpu(img, filename) + else: + if not hasattr(img, "pixel_array"): + raise ValueError(f"dicom data: {filename} does not have pixel_array.") + data = img.pixel_array slope, offset = 1.0, 0.0 rescale_flag = False @@ -862,8 +979,14 @@ def _get_array_data(self, img): if hasattr(img, "RescaleIntercept"): offset = img.RescaleIntercept rescale_flag = True + if rescale_flag: - data = data.astype(np.float32) * slope + offset + if self.to_gpu: + slope = cp.asarray(slope, dtype=cp.float32) + offset = cp.asarray(offset, dtype=cp.float32) + data = data.astype(cp.float32) * slope + offset + else: + data = data.astype(np.float32) * slope + offset return data @@ -884,8 +1007,6 @@ class NibabelReader(ImageReader): Default is False. CuPy and Kvikio are required for this option. Note: For compressed NIfTI files, some operations may still be performed on CPU memory, and the acceleration may not be significant. In some cases, it may be slower than loading on CPU. - In practical use, it's recommended to add a warm up call before the actual loading. - A related tutorial will be prepared in the future, and the document will be updated accordingly. kwargs: additional args for `nibabel.load` API. more details about available args: https://github.com/nipy/nibabel/blob/master/nibabel/loadsave.py diff --git a/tests/test_load_image.py b/tests/test_load_image.py index 498b9972b4..07acf7c179 100644 --- a/tests/test_load_image.py +++ b/tests/test_load_image.py @@ -168,6 +168,16 @@ def get_data(self, _obj): # test reader consistency between PydicomReader and ITKReader on dicom data TEST_CASE_22 = ["tests/testing_data/CT_DICOM"] +# test pydicom gpu reader +TEST_CASE_GPU_5 = [{"reader": "PydicomReader", "to_gpu": True}, "tests/testing_data/CT_DICOM", (16, 16, 4), (16, 16, 4)] + +TEST_CASE_GPU_6 = [ + {"reader": "PydicomReader", "ensure_channel_first": True, "force": True, "to_gpu": True}, + "tests/testing_data/CT_DICOM", + (16, 16, 4), + (1, 16, 16, 4), +] + TESTS_META = [] for track_meta in (False, True): TESTS_META.append([{}, (128, 128, 128), track_meta]) @@ -242,16 +252,17 @@ def test_nibabel_reader_gpu(self, input_param, filenames, expected_shape): @parameterized.expand([TEST_CASE_6, TEST_CASE_7, TEST_CASE_8, TEST_CASE_8_1, TEST_CASE_9]) def test_itk_reader(self, input_param, filenames, expected_shape): - test_image = np.random.rand(128, 128, 128) + test_image = torch.randint(0, 256, (128, 128, 128), dtype=torch.uint8).numpy() + print("Test image value range:", test_image.min(), test_image.max()) with tempfile.TemporaryDirectory() as tempdir: for i, name in enumerate(filenames): filenames[i] = os.path.join(tempdir, name) - itk_np_view = itk.image_view_from_array(test_image) - itk.imwrite(itk_np_view, filenames[i]) + nib.save(nib.Nifti1Image(test_image, np.eye(4)), filenames[i]) result = LoadImage(image_only=True, **input_param)(filenames) - self.assertEqual(result.meta["filename_or_obj"], os.path.join(tempdir, "test_image.nii.gz")) - diag = torch.as_tensor(np.diag([-1, -1, 1, 1])) - np.testing.assert_allclose(result.affine, diag) + ext = "".join(Path(name).suffixes) + self.assertEqual(result.meta["filename_or_obj"], os.path.join(tempdir, "test_image" + ext)) + self.assertEqual(result.meta["space"], "RAS") + assert_allclose(result.affine, torch.eye(4)) self.assertTupleEqual(result.shape, expected_shape) @parameterized.expand([TEST_CASE_10, TEST_CASE_11, TEST_CASE_12, TEST_CASE_19, TEST_CASE_20, TEST_CASE_21]) @@ -271,6 +282,26 @@ def test_itk_dicom_series_reader(self, input_param, filenames, expected_shape, e ) self.assertTupleEqual(result.shape, expected_np_shape) + @SkipIfNoModule("pydicom") + @SkipIfNoModule("cupy") + @SkipIfNoModule("kvikio") + @parameterized.expand([TEST_CASE_GPU_5, TEST_CASE_GPU_6]) + def test_pydicom_gpu_reader(self, input_param, filenames, expected_shape, expected_np_shape): + result = LoadImage(image_only=True, **input_param)(filenames) + self.assertEqual(result.meta["filename_or_obj"], f"{Path(filenames)}") + assert_allclose( + result.affine, + torch.tensor( + [ + [-0.488281, 0.0, 0.0, 125.0], + [0.0, -0.488281, 0.0, 128.100006], + [0.0, 0.0, 68.33333333, -99.480003], + [0.0, 0.0, 0.0, 1.0], + ] + ), + ) + self.assertTupleEqual(result.shape, expected_np_shape) + def test_no_files(self): with self.assertRaisesRegex(RuntimeError, "list index out of range"): # fname_regex excludes everything LoadImage(image_only=True, reader="PydicomReader", fname_regex=r"^(?!.*).*")("tests/testing_data/CT_DICOM") @@ -317,6 +348,21 @@ def test_dicom_reader_consistency(self, filenames): np.testing.assert_allclose(pydicom_result, itk_result) np.testing.assert_allclose(pydicom_result.affine, itk_result.affine) + @SkipIfNoModule("pydicom") + @SkipIfNoModule("cupy") + @SkipIfNoModule("kvikio") + @parameterized.expand([TEST_CASE_22]) + def test_pydicom_reader_gpu_cpu_consistency(self, filenames): + gpu_param = {"reader": "PydicomReader", "to_gpu": True} + cpu_param = {"reader": "PydicomReader", "to_gpu": False} + for affine_flag in [True, False]: + gpu_param["affine_lps_to_ras"] = affine_flag + cpu_param["affine_lps_to_ras"] = affine_flag + gpu_result = LoadImage(image_only=True, **gpu_param)(filenames) + cpu_result = LoadImage(image_only=True, **cpu_param)(filenames) + np.testing.assert_allclose(gpu_result.cpu(), cpu_result) + np.testing.assert_allclose(gpu_result.affine.cpu(), cpu_result.affine) + def test_dicom_reader_consistency_single(self): itk_param = {"reader": "ITKReader"} pydicom_param = {"reader": "PydicomReader"} From 621fc5fad37f4c45eabdf59cd20db695d5b7c5fc Mon Sep 17 00:00:00 2001 From: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Date: Mon, 3 Feb 2025 05:03:17 +0000 Subject: [PATCH 09/55] Zarr compression tests only with versions before 3.0 (#8319) Fixes #8298. ### Description This includes the tests for the `compressor` argument when testing with Zarr before version 3.0 when this argument was deprecated. A fix to upgrade the version of `pycln` used is also included. The version of PyTorch is also fixed to below 2.6 to avoid issues with misuse of `torch.load` which must be addressed later. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: Eric Kerfoot Signed-off-by: Can-Zhao --- .pre-commit-config.yaml | 2 +- monai/data/meta_tensor.py | 5 ++++ monai/utils/jupyter_utils.py | 2 +- monai/visualize/img2tensorboard.py | 4 +-- requirements-dev.txt | 2 +- requirements.txt | 2 +- tests/test_zarr_avg_merger.py | 45 +++++++++++++++--------------- 7 files changed, 34 insertions(+), 28 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2a57fbf31a..9621a1fe95 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: )$ - repo: https://github.com/hadialqattan/pycln - rev: v2.4.0 + rev: v2.5.0 hooks: - id: pycln args: [--config=pyproject.toml] diff --git a/monai/data/meta_tensor.py b/monai/data/meta_tensor.py index c4c491e1b9..6425bc0a4f 100644 --- a/monai/data/meta_tensor.py +++ b/monai/data/meta_tensor.py @@ -607,3 +607,8 @@ def print_verbose(self) -> None: print(self) if self.meta is not None: print(self.meta.__repr__()) + + +# needed in later versions of Pytorch to indicate the class is safe for serialisation +if hasattr(torch.serialization, "add_safe_globals"): + torch.serialization.add_safe_globals([MetaTensor]) diff --git a/monai/utils/jupyter_utils.py b/monai/utils/jupyter_utils.py index b1b43a6767..c93e93dcb9 100644 --- a/monai/utils/jupyter_utils.py +++ b/monai/utils/jupyter_utils.py @@ -234,7 +234,7 @@ def plot_engine_status( def _get_loss_from_output( - output: list[torch.Tensor | dict[str, torch.Tensor]] | dict[str, torch.Tensor] | torch.Tensor + output: list[torch.Tensor | dict[str, torch.Tensor]] | dict[str, torch.Tensor] | torch.Tensor, ) -> torch.Tensor: """Returns a single value from the network output, which is a dict or tensor.""" diff --git a/monai/visualize/img2tensorboard.py b/monai/visualize/img2tensorboard.py index 677640bd04..fd328f2c7a 100644 --- a/monai/visualize/img2tensorboard.py +++ b/monai/visualize/img2tensorboard.py @@ -65,11 +65,11 @@ def _image3_animated_gif( img_str = b"" for b_data in PIL.GifImagePlugin.getheader(ims[0])[0]: img_str += b_data - img_str += b"\x21\xFF\x0B\x4E\x45\x54\x53\x43\x41\x50" b"\x45\x32\x2E\x30\x03\x01\x00\x00\x00" + img_str += b"\x21\xff\x0b\x4e\x45\x54\x53\x43\x41\x50" b"\x45\x32\x2e\x30\x03\x01\x00\x00\x00" for i in ims: for b_data in PIL.GifImagePlugin.getdata(i): img_str += b_data - img_str += b"\x3B" + img_str += b"\x3b" summary = SummaryX if has_tensorboardx and isinstance(writer, SummaryWriterX) else Summary summary_image_str = summary.Image(height=10, width=10, colorspace=1, encoded_image_string=img_str) diff --git a/requirements-dev.txt b/requirements-dev.txt index bffe304df4..c9730ee651 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -18,7 +18,7 @@ pep8-naming pycodestyle pyflakes black>=22.12 -isort>=5.1 +isort>=5.1, <6.0 ruff pytype>=2020.6.1; platform_system != "Windows" types-setuptools diff --git a/requirements.txt b/requirements.txt index e184322c13..85e7312f5d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -torch>=1.9 +torch>=1.9,<2.6 numpy>=1.24,<2.0 diff --git a/tests/test_zarr_avg_merger.py b/tests/test_zarr_avg_merger.py index 3c89e4fb03..64e8fbde71 100644 --- a/tests/test_zarr_avg_merger.py +++ b/tests/test_zarr_avg_merger.py @@ -260,32 +260,33 @@ TENSOR_4x4, ] +ALL_TESTS = [ + TEST_CASE_0_DEFAULT_DTYPE, + TEST_CASE_1_DEFAULT_DTYPE, + TEST_CASE_2_DEFAULT_DTYPE, + TEST_CASE_3_DEFAULT_DTYPE, + TEST_CASE_4_DEFAULT_DTYPE, + TEST_CASE_5_VALUE_DTYPE, + TEST_CASE_6_COUNT_DTYPE, + TEST_CASE_7_COUNT_VALUE_DTYPE, + TEST_CASE_8_DTYPE, + TEST_CASE_9_LARGER_SHAPE, + TEST_CASE_10_DIRECTORY_STORE, + TEST_CASE_11_MEMORY_STORE, + TEST_CASE_12_CHUNKS, + TEST_CASE_16_WITH_LOCK, + TEST_CASE_17_WITHOUT_LOCK, +] + +# add compression tests only when using Zarr version before 3.0 +if not version_geq(get_package_version("zarr"), "3.0.0"): + ALL_TESTS += [TEST_CASE_13_COMPRESSOR_LZ4, TEST_CASE_14_COMPRESSOR_PICKLE, TEST_CASE_15_COMPRESSOR_LZMA] + @unittest.skipUnless(has_zarr and has_numcodecs, "Requires zarr (and numcodecs) packages.)") class ZarrAvgMergerTests(unittest.TestCase): - @parameterized.expand( - [ - TEST_CASE_0_DEFAULT_DTYPE, - TEST_CASE_1_DEFAULT_DTYPE, - TEST_CASE_2_DEFAULT_DTYPE, - TEST_CASE_3_DEFAULT_DTYPE, - TEST_CASE_4_DEFAULT_DTYPE, - TEST_CASE_5_VALUE_DTYPE, - TEST_CASE_6_COUNT_DTYPE, - TEST_CASE_7_COUNT_VALUE_DTYPE, - TEST_CASE_8_DTYPE, - TEST_CASE_9_LARGER_SHAPE, - TEST_CASE_10_DIRECTORY_STORE, - TEST_CASE_11_MEMORY_STORE, - TEST_CASE_12_CHUNKS, - TEST_CASE_13_COMPRESSOR_LZ4, - TEST_CASE_14_COMPRESSOR_PICKLE, - TEST_CASE_15_COMPRESSOR_LZMA, - TEST_CASE_16_WITH_LOCK, - TEST_CASE_17_WITHOUT_LOCK, - ] - ) + @parameterized.expand(ALL_TESTS) def test_zarr_avg_merger_patches(self, arguments, patch_locations, expected): codec_reg = numcodecs.registry.codec_registry if "compressor" in arguments: From 3b83a56f28cb744c4402c510617401c9297714b4 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 11:24:18 +0000 Subject: [PATCH 10/55] add rectified flow noise scheduler to monai Signed-off-by: Can-Zhao Signed-off-by: Can-Zhao --- monai/inferers/inferer.py | 19 +- monai/networks/schedulers/__init__.py | 1 + monai/networks/schedulers/rectified_flow.py | 283 ++++++++++++++++++++ monai/utils/jupyter_utils.py | 2 +- tests/test_diffusion_inferer.py | 18 +- 5 files changed, 317 insertions(+), 6 deletions(-) create mode 100644 monai/networks/schedulers/rectified_flow.py diff --git a/monai/inferers/inferer.py b/monai/inferers/inferer.py index 769b6cc0e7..61fbacd1a7 100644 --- a/monai/inferers/inferer.py +++ b/monai/inferers/inferer.py @@ -39,7 +39,7 @@ SPADEAutoencoderKL, SPADEDiffusionModelUNet, ) -from monai.networks.schedulers import Scheduler +from monai.networks.schedulers import RFlowScheduler, Scheduler from monai.transforms import CenterSpatialCrop, SpatialPad from monai.utils import BlendMode, Ordering, PatchKeys, PytorchPadMode, ensure_tuple, optional_import from monai.visualize import CAM, GradCAM, GradCAMpp @@ -859,12 +859,19 @@ def sample( if not scheduler: scheduler = self.scheduler image = input_noise + + all_next_timesteps = torch.cat((scheduler.timesteps[1:], torch.tensor([0], dtype=scheduler.timesteps.dtype))) if verbose and has_tqdm: - progress_bar = tqdm(scheduler.timesteps) + progress_bar = tqdm( + zip(scheduler.timesteps, all_next_timesteps), + total=min(len(scheduler.timesteps), len(all_next_timesteps)), + ) else: progress_bar = iter(scheduler.timesteps) + progress_bar = iter(zip(scheduler.timesteps, all_next_timesteps)) intermediates = [] - for t in progress_bar: + + for t, next_t in progress_bar: # 1. predict noise model_output diffusion_model = ( partial(diffusion_model, seg=seg) @@ -882,9 +889,13 @@ def sample( ) # 2. compute previous image: x_t -> x_t-1 - image, _ = scheduler.step(model_output, t, image) + if not isinstance(scheduler, RFlowScheduler): + image, _ = scheduler.step(model_output, t, image) + else: + image, _ = scheduler.step(model_output, t, image, next_t) if save_intermediates and t % intermediate_steps == 0: intermediates.append(image) + if save_intermediates: return image, intermediates else: diff --git a/monai/networks/schedulers/__init__.py b/monai/networks/schedulers/__init__.py index 29e9020d65..b7b34f9a77 100644 --- a/monai/networks/schedulers/__init__.py +++ b/monai/networks/schedulers/__init__.py @@ -14,4 +14,5 @@ from .ddim import DDIMScheduler from .ddpm import DDPMScheduler from .pndm import PNDMScheduler +from .rectified_flow import RFlowScheduler from .scheduler import NoiseSchedules, Scheduler diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py new file mode 100644 index 0000000000..6a848f0762 --- /dev/null +++ b/monai/networks/schedulers/rectified_flow.py @@ -0,0 +1,283 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ========================================================================= +# Adapted from https://github.com/hpcaitech/Open-Sora/blob/main/opensora/schedulers/rf/rectified_flow.py +# which has the following license: +# https://github.com/hpcaitech/Open-Sora/blob/main/LICENSE +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========================================================================= + +from __future__ import annotations + +from typing import Any + +import numpy as np +import torch +from torch.distributions import LogisticNormal + +from .scheduler import Scheduler + + +def timestep_transform( + t, input_img_size_numel, base_img_size_numel=32 * 32 * 32, scale=1.0, num_train_timesteps=1000, spatial_dim=3 +): + """ + Applies a transformation to the timestep based on image resolution scaling. + + Args: + t (torch.Tensor): The original timestep(s). + input_img_size_numel (torch.Tensor): The input image's size (H * W * D). + base_img_size_numel (int): reference H*W*D size, usually smaller than input_img_size_numel. + scale (float): Scaling factor for the transformation. + num_train_timesteps (int): Total number of training timesteps. + spatial_dim (int): Number of spatial dimensions in the image. + + Returns: + torch.Tensor: Transformed timestep(s). + """ + t = t / num_train_timesteps + ratio_space = (input_img_size_numel / base_img_size_numel).pow(1.0 / spatial_dim) + + ratio = ratio_space * scale + new_t = ratio * t / (1 + (ratio - 1) * t) + + new_t = new_t * num_train_timesteps + return new_t + + +class RFlowScheduler(Scheduler): + """ + A rectified flow scheduler for guiding the diffusion process in a generative model. + + Supports uniform and logit-normal sampling methods, timestep transformation for + different resolutions, and noise addition during diffusion. + + Attributes: + num_train_timesteps (int): Total number of training timesteps. + use_discrete_timesteps (bool): Whether to use discrete timesteps. + sample_method (str): Training time step sampling method ('uniform' or 'logit-normal'). + loc (float): Location parameter for logit-normal distribution, used only if sample_method='logit-normal'. + scale (float): Scale parameter for logit-normal distribution, used only if sample_method='logit-normal'. + use_timestep_transform (bool): Whether to apply timestep transformation. + If true, there will be more inference timesteps at early(noisy) stages for larger image volumes. + transform_scale (float): Scaling factor for timestep transformation, used only if use_timestep_transform=True. + steps_offset (int): Offset added to computed timesteps, used only if use_timestep_transform=True. + base_img_size_numel (int): Reference image volume size for scaling, used only if use_timestep_transform=True. + + Example: + + .. code-block:: python + + # define a scheduler + noise_scheduler = RFlowScheduler( + num_train_timesteps = 1000, + use_discrete_timesteps = True, + sample_method = 'logit-normal', + use_timestep_transform = True, + base_img_size_numel = 32 * 32 * 32 + ) + + # during training + inputs = torch.ones(2,4,64,64,64) + noise = torch.randn_like(inputs) + timesteps = noise_scheduler.sample_timesteps(inputs) + noisy_inputs = noise_scheduler.add_noise(original_samples=inputs, noise=noise, timesteps=timesteps) + predicted_velocity = diffusion_unet( + x=noisy_inputs, + timesteps=timesteps + ) + loss = loss_l1(predicted_velocity, (inputs - noise)) + + # during inference + noisy_inputs = torch.randn(2,4,64,64,64) + input_img_size_numel = torch.prod(torch.tensor(noisy_inputs.shape[-3:]) + noise_scheduler.set_timesteps( + num_inference_steps=30, input_img_size_numel=input_img_size_numel) + ) + all_next_timesteps = torch.cat( + (noise_scheduler.timesteps[1:], torch.tensor([0], dtype=noise_scheduler.timesteps.dtype)) + ) + for t, next_t in tqdm( + zip(noise_scheduler.timesteps, all_next_timesteps), + total=min(len(noise_scheduler.timesteps), len(all_next_timesteps)), + ): + predicted_velocity = diffusion_unet( + x=noisy_inputs, + timesteps=timesteps + ) + noisy_inputs, _ = noise_scheduler.step(predicted_velocity, t, noisy_inputs, next_t) + final_output = noisy_inputs + """ + + def __init__( + self, + num_train_timesteps: int = 1000, + use_discrete_timesteps: bool = True, + sample_method: str = "uniform", + loc: float = 0.0, + scale: float = 1.0, + use_timestep_transform: bool = False, + transform_scale: float = 1.0, + steps_offset: int = 0, + base_img_size_numel: int = 32 * 32 * 32, + ): + self.num_train_timesteps = num_train_timesteps + self.use_discrete_timesteps = use_discrete_timesteps + self.base_img_size_numel = base_img_size_numel + + # sample method + if sample_method not in ["uniform", "logit-normal"]: + raise ValueError( + f"sample_method = {sample_method}, which has to be chosen from ['uniform', 'logit-normal']." + ) + self.sample_method = sample_method + if sample_method == "logit-normal": + self.distribution = LogisticNormal(torch.tensor([loc]), torch.tensor([scale])) + self.sample_t = lambda x: self.distribution.sample((x.shape[0],))[:, 0].to(x.device) + + # timestep transform + self.use_timestep_transform = use_timestep_transform + self.transform_scale = transform_scale + self.steps_offset = steps_offset + + def add_noise( + self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor + ) -> torch.FloatTensor: + """ + Adds noise to the original samples based on the given timesteps. + + Args: + original_samples (torch.FloatTensor): The original sample tensor. + noise (torch.FloatTensor): Noise tensor to be added. + timesteps (torch.IntTensor): Timesteps corresponding to each sample. + + Returns: + torch.FloatTensor: The noisy sample tensor. + """ + timepoints = timesteps.float() / self.num_train_timesteps + timepoints = 1 - timepoints # [1,1/1000] + + # timepoint (bsz) noise: (bsz, 4, frame, w ,h) + # expand timepoint to noise shape + timepoints = timepoints.unsqueeze(1).unsqueeze(1).unsqueeze(1).unsqueeze(1) + timepoints = timepoints.repeat(1, noise.shape[1], noise.shape[2], noise.shape[3], noise.shape[4]) + + return timepoints * original_samples + (1 - timepoints) * noise + + def set_timesteps( + self, + num_inference_steps: int, + device: str | torch.device | None = None, + input_img_size_numel: int | None = None, + ) -> None: + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + num_inference_steps: number of diffusion steps used when generating samples with a pre-trained model. + device: target device to put the data. + input_img_size_numel: int, H*W*D of the image, used with self.use_timestep_transform is True. + """ + if num_inference_steps > self.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.num_train_timesteps`:" + f" {self.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + # prepare timesteps + timesteps = [ + (1.0 - i / self.num_inference_steps) * self.num_train_timesteps for i in range(self.num_inference_steps) + ] + if self.use_discrete_timesteps: + timesteps = [int(round(t)) for t in timesteps] + if self.use_timestep_transform: + timesteps = [ + timestep_transform( + t, + input_img_size_numel=input_img_size_numel, + base_img_size_numel=self.base_img_size_numel, + num_train_timesteps=self.num_train_timesteps, + ) + for t in timesteps + ] + timesteps = np.array(timesteps).astype(np.float16) + if self.use_discrete_timesteps: + timesteps = timesteps.astype(np.int64) + self.timesteps = torch.from_numpy(timesteps).to(device) + self.timesteps += self.steps_offset + + def sample_timesteps(self, x_start): + """ + Randomly samples training timesteps using the chosen sampling method. + + Args: + x_start (torch.Tensor): The input tensor for sampling. + + Returns: + torch.Tensor: Sampled timesteps. + """ + if self.sample_method == "uniform": + t = torch.rand((x_start.shape[0],), device=x_start.device) * self.num_train_timesteps + elif self.sample_method == "logit-normal": + t = self.sample_t(x_start) * self.num_train_timesteps + + if self.use_discrete_timesteps: + t = t.long() + + if self.use_timestep_transform: + input_img_size_numel = torch.prod(torch.tensor(x_start.shape[-3:])) + t = timestep_transform( + t, + input_img_size_numel=input_img_size_numel, + base_img_size_numel=self.base_img_size_numel, + num_train_timesteps=self.num_train_timesteps, + ) + + return t + + def step( + self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, next_timestep=None + ) -> tuple[torch.Tensor, Any]: + """ + Predict the sample at the previous timestep. Core function to propagate the diffusion + process from the learned model outputs. + + Args: + model_output: direct output from learned diffusion model. + timestep: current discrete timestep in the diffusion chain. + sample: current instance of sample being created by diffusion process. + next_timestep: next discrete timestep in the diffusion chain. + Returns: + pred_prev_sample: Predicted previous sample + None + """ + v_pred = model_output + if next_timestep is None: + dt = 1.0 / self.num_inference_steps + else: + dt = timestep - next_timestep + dt = dt / self.num_train_timesteps + z = sample + v_pred * dt + + return z, None diff --git a/monai/utils/jupyter_utils.py b/monai/utils/jupyter_utils.py index c93e93dcb9..b1b43a6767 100644 --- a/monai/utils/jupyter_utils.py +++ b/monai/utils/jupyter_utils.py @@ -234,7 +234,7 @@ def plot_engine_status( def _get_loss_from_output( - output: list[torch.Tensor | dict[str, torch.Tensor]] | dict[str, torch.Tensor] | torch.Tensor, + output: list[torch.Tensor | dict[str, torch.Tensor]] | dict[str, torch.Tensor] | torch.Tensor ) -> torch.Tensor: """Returns a single value from the network output, which is a dict or tensor.""" diff --git a/tests/test_diffusion_inferer.py b/tests/test_diffusion_inferer.py index 7f37025d3c..6b74452288 100644 --- a/tests/test_diffusion_inferer.py +++ b/tests/test_diffusion_inferer.py @@ -19,7 +19,7 @@ from monai.inferers import DiffusionInferer from monai.networks.nets import DiffusionModelUNet -from monai.networks.schedulers import DDIMScheduler, DDPMScheduler +from monai.networks.schedulers import DDIMScheduler, DDPMScheduler, RFlowScheduler from monai.utils import optional_import _, has_scipy = optional_import("scipy") @@ -120,6 +120,22 @@ def test_ddim_sampler(self, model_params, input_shape): ) self.assertEqual(len(intermediates), 10) + @parameterized.expand(TEST_CASES) + @skipUnless(has_einops, "Requires einops") + def test_rflow_sampler(self, model_params, input_shape): + model = DiffusionModelUNet(**model_params) + device = "cuda:0" if torch.cuda.is_available() else "cpu" + model.to(device) + model.eval() + noise = torch.randn(input_shape).to(device) + scheduler = RFlowScheduler(num_train_timesteps=1000) + inferer = DiffusionInferer(scheduler=scheduler) + scheduler.set_timesteps(num_inference_steps=10) + sample, intermediates = inferer.sample( + input_noise=noise, diffusion_model=model, scheduler=scheduler, save_intermediates=True, intermediate_steps=1 + ) + self.assertEqual(len(intermediates), 10) + @parameterized.expand(TEST_CASES) @skipUnless(has_einops, "Requires einops") def test_sampler_conditioned(self, model_params, input_shape): From dff1a4a76b2605fca24c04395d9e2c89094eb655 Mon Sep 17 00:00:00 2001 From: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Date: Tue, 11 Feb 2025 16:18:37 +0000 Subject: [PATCH 11/55] Changing utils.py to test_utils.py (#8335) Related to #8185. ### Description This changes the name of `tests/utils.py` to `tests/test_utils.py` to conform to the changes introduced with the daily CICD tests. This is done in #8231 but the change is being pre-merged now to get tests working again while issues are sorted out there. This also includes changes this PR made to that file, and changes anywhere else to correctly import the module. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: Eric Kerfoot Signed-off-by: Can-Zhao --- tests/croppers.py | 2 +- tests/hvd_evenly_divisible_all_gather.py | 2 +- tests/lazy_transforms_utils.py | 2 +- tests/ngc_bundle_download.py | 2 +- tests/padders.py | 2 +- tests/test_activations.py | 2 +- tests/test_activationsd.py | 2 +- tests/test_add_coordinate_channels.py | 2 +- tests/test_add_coordinate_channelsd.py | 2 +- tests/test_add_extreme_points_channel.py | 2 +- tests/test_add_extreme_points_channeld.py | 2 +- tests/test_adjust_contrast.py | 2 +- tests/test_adjust_contrastd.py | 2 +- tests/test_adn.py | 2 +- tests/test_affine.py | 2 +- tests/test_affine_grid.py | 2 +- tests/test_affine_transform.py | 2 +- tests/test_affined.py | 2 +- tests/test_ahnet.py | 2 +- tests/test_anchor_box.py | 2 +- tests/test_apply.py | 2 +- tests/test_as_channel_last.py | 2 +- tests/test_as_channel_lastd.py | 2 +- tests/test_as_discrete.py | 2 +- tests/test_as_discreted.py | 2 +- tests/test_atss_box_matcher.py | 2 +- tests/test_attentionunet.py | 2 +- tests/test_auto3dseg.py | 2 +- tests/test_auto3dseg_bundlegen.py | 2 +- tests/test_auto3dseg_ensemble.py | 2 +- tests/test_auto3dseg_hpo.py | 2 +- tests/test_autoencoder.py | 2 +- tests/test_autoencoderkl.py | 2 +- tests/test_autoencoderkl_maisi.py | 2 +- tests/test_avg_merger.py | 2 +- tests/test_basic_unet.py | 2 +- tests/test_basic_unetplusplus.py | 2 +- tests/test_bilateral_approx_cpu.py | 2 +- tests/test_bilateral_approx_cuda.py | 2 +- tests/test_bilateral_precise.py | 2 +- tests/test_blend_images.py | 2 +- tests/test_bounding_rect.py | 2 +- tests/test_bounding_rectd.py | 2 +- tests/test_box_coder.py | 2 +- tests/test_box_transform.py | 2 +- tests/test_box_utils.py | 2 +- tests/test_bundle_ckpt_export.py | 2 +- tests/test_bundle_download.py | 2 +- tests/test_bundle_get_data.py | 2 +- tests/test_bundle_init_bundle.py | 2 +- tests/test_bundle_onnx_export.py | 2 +- tests/test_bundle_push_to_hf_hub.py | 2 +- tests/test_bundle_trt_export.py | 2 +- tests/test_bundle_utils.py | 2 +- tests/test_bundle_verify_metadata.py | 2 +- tests/test_bundle_verify_net.py | 2 +- tests/test_call_dist.py | 2 +- tests/test_cast_to_type.py | 2 +- tests/test_cast_to_typed.py | 2 +- tests/test_classes_to_indices.py | 2 +- tests/test_classes_to_indicesd.py | 2 +- tests/test_clip_intensity_percentiles.py | 2 +- tests/test_clip_intensity_percentilesd.py | 2 +- tests/test_complex_utils.py | 2 +- tests/test_compute_confusion_matrix.py | 2 +- tests/test_compute_f_beta.py | 2 +- tests/test_compute_ho_ver_maps.py | 2 +- tests/test_compute_ho_ver_maps_d.py | 2 +- tests/test_compute_panoptic_quality.py | 2 +- tests/test_concat_itemsd.py | 2 +- tests/test_config_parser.py | 2 +- tests/test_controlnet.py | 2 +- tests/test_controlnet_maisi.py | 2 +- tests/test_convert_box_points.py | 2 +- tests/test_convert_data_type.py | 2 +- tests/test_convert_to_multi_channel.py | 2 +- tests/test_convert_to_onnx.py | 2 +- tests/test_convert_to_trt.py | 2 +- tests/test_convolutions.py | 2 +- tests/test_copy_itemsd.py | 2 +- tests/test_correct_crop_centers.py | 2 +- tests/test_create_grid_and_affine.py | 2 +- tests/test_crf_cpu.py | 2 +- tests/test_crf_cuda.py | 2 +- tests/test_crop_foreground.py | 2 +- tests/test_crop_foregroundd.py | 2 +- tests/test_cross_validation.py | 2 +- tests/test_crossattention.py | 2 +- tests/test_csv_iterable_dataset.py | 2 +- tests/test_cucim_dict_transform.py | 2 +- tests/test_cucim_transform.py | 2 +- tests/test_cumulative.py | 2 +- tests/test_cumulative_average_dist.py | 2 +- tests/test_cv2_dist.py | 2 +- tests/test_daf3d.py | 2 +- tests/test_dataloader.py | 2 +- tests/test_decathlondataset.py | 2 +- tests/test_decollate.py | 2 +- tests/test_denseblock.py | 2 +- tests/test_densenet.py | 2 +- tests/test_detect_envelope.py | 2 +- tests/test_detector_boxselector.py | 2 +- tests/test_detector_utils.py | 2 +- tests/test_dice_focal_loss.py | 2 +- tests/test_dice_loss.py | 2 +- tests/test_diffusion_model_unet.py | 2 +- tests/test_dints_mixop.py | 2 +- tests/test_dints_network.py | 2 +- tests/test_discriminator.py | 2 +- tests/test_distance_transform_edt.py | 2 +- tests/test_download_and_extract.py | 2 +- tests/test_ds_loss.py | 2 +- tests/test_dynunet.py | 2 +- tests/test_dynunet_block.py | 2 +- tests/test_efficientnet.py | 2 +- tests/test_ensemble_evaluator.py | 2 +- tests/test_ensure_tuple.py | 2 +- tests/test_ensure_type.py | 2 +- tests/test_ensure_typed.py | 2 +- tests/test_enum_bound_interp.py | 2 +- tests/test_evenly_divisible_all_gather_dist.py | 2 +- tests/test_fastmri_reader.py | 2 +- tests/test_fft_utils.py | 2 +- tests/test_fg_bg_to_indices.py | 2 +- tests/test_fg_bg_to_indicesd.py | 2 +- tests/test_fill_holes.py | 2 +- tests/test_fill_holesd.py | 2 +- tests/test_fl_exchange_object.py | 2 +- tests/test_fl_monai_algo.py | 2 +- tests/test_fl_monai_algo_dist.py | 2 +- tests/test_fl_monai_algo_stats.py | 2 +- tests/test_flexible_unet.py | 2 +- tests/test_flip.py | 8 +++++++- tests/test_flipd.py | 8 +++++++- tests/test_focal_loss.py | 2 +- tests/test_foreground_mask.py | 2 +- tests/test_foreground_maskd.py | 2 +- tests/test_fourier.py | 2 +- tests/test_fpn_block.py | 2 +- tests/test_from_engine_hovernet.py | 2 +- tests/test_gaussian_filter.py | 2 +- tests/test_gaussian_sharpen.py | 2 +- tests/test_gaussian_sharpend.py | 2 +- tests/test_gaussian_smooth.py | 2 +- tests/test_gaussian_smoothd.py | 2 +- tests/test_gdsdataset.py | 2 +- tests/test_generalized_dice_focal_loss.py | 2 +- tests/test_generalized_dice_loss.py | 2 +- tests/test_generalized_wasserstein_dice_loss.py | 2 +- tests/test_generate_distance_map.py | 2 +- tests/test_generate_distance_mapd.py | 2 +- tests/test_generate_instance_border.py | 2 +- tests/test_generate_instance_borderd.py | 2 +- tests/test_generate_instance_centroid.py | 2 +- tests/test_generate_instance_centroidd.py | 2 +- tests/test_generate_instance_contour.py | 2 +- tests/test_generate_instance_contourd.py | 2 +- tests/test_generate_instance_type.py | 2 +- tests/test_generate_instance_typed.py | 2 +- tests/test_generate_label_classes_crop_centers.py | 2 +- tests/test_generate_param_groups.py | 2 +- tests/test_generate_pos_neg_label_crop_centers.py | 2 +- tests/test_generate_spatial_bounding_box.py | 2 +- tests/test_generate_watershed_markers.py | 2 +- tests/test_generate_watershed_markersd.py | 2 +- tests/test_generate_watershed_mask.py | 2 +- tests/test_generate_watershed_maskd.py | 2 +- tests/test_generator.py | 2 +- tests/test_get_equivalent_dtype.py | 2 +- tests/test_get_extreme_points.py | 2 +- tests/test_get_unique_labels.py | 2 +- tests/test_gibbs_noise.py | 2 +- tests/test_gibbs_noised.py | 2 +- tests/test_global_mutual_information_loss.py | 2 +- tests/test_globalnet.py | 2 +- tests/test_gmm.py | 2 +- tests/test_grid_dataset.py | 2 +- tests/test_grid_distortion.py | 2 +- tests/test_grid_distortiond.py | 2 +- tests/test_grid_patch.py | 2 +- tests/test_grid_patchd.py | 2 +- tests/test_grid_pull.py | 2 +- tests/test_grid_split.py | 2 +- tests/test_grid_splitd.py | 2 +- tests/test_handler_checkpoint_loader.py | 2 +- tests/test_handler_classification_saver_dist.py | 2 +- tests/test_handler_confusion_matrix.py | 2 +- tests/test_handler_confusion_matrix_dist.py | 2 +- tests/test_handler_decollate_batch.py | 2 +- tests/test_handler_hausdorff_distance.py | 2 +- tests/test_handler_ignite_metric.py | 2 +- tests/test_handler_logfile.py | 2 +- tests/test_handler_mean_dice.py | 2 +- tests/test_handler_mean_iou.py | 2 +- tests/test_handler_metric_logger.py | 2 +- tests/test_handler_metrics_reloaded.py | 2 +- tests/test_handler_metrics_saver_dist.py | 2 +- tests/test_handler_mlflow.py | 2 +- tests/test_handler_nvtx.py | 2 +- tests/test_handler_panoptic_quality.py | 2 +- tests/test_handler_parameter_scheduler.py | 2 +- tests/test_handler_post_processing.py | 2 +- tests/test_handler_regression_metrics_dist.py | 2 +- tests/test_handler_rocauc_dist.py | 2 +- tests/test_handler_surface_distance.py | 2 +- tests/test_handler_tb_image.py | 2 +- tests/test_hardnegsampler.py | 2 +- tests/test_highresnet.py | 2 +- tests/test_hilbert_transform.py | 2 +- tests/test_histogram_normalize.py | 2 +- tests/test_histogram_normalized.py | 2 +- tests/test_hovernet.py | 2 +- tests/test_hovernet_instance_map_post_processing.py | 2 +- tests/test_hovernet_instance_map_post_processingd.py | 2 +- tests/test_hovernet_nuclear_type_post_processing.py | 2 +- tests/test_hovernet_nuclear_type_post_processingd.py | 2 +- tests/test_identity.py | 2 +- tests/test_identityd.py | 2 +- tests/test_image_rw.py | 2 +- tests/test_init_reader.py | 2 +- tests/test_integration_autorunner.py | 2 +- tests/test_integration_bundle_run.py | 2 +- tests/test_integration_classification_2d.py | 2 +- tests/test_integration_determinism.py | 2 +- tests/test_integration_fast_train.py | 2 +- tests/test_integration_gpu_customization.py | 2 +- tests/test_integration_lazy_samples.py | 2 +- tests/test_integration_nnunetv2_runner.py | 2 +- tests/test_integration_segmentation_3d.py | 2 +- tests/test_integration_sliding_window.py | 2 +- tests/test_integration_stn.py | 2 +- tests/test_integration_unet_2d.py | 2 +- tests/test_integration_workers.py | 2 +- tests/test_integration_workflows.py | 2 +- tests/test_integration_workflows_adversarial.py | 2 +- tests/test_integration_workflows_gan.py | 2 +- tests/test_intensity_stats.py | 2 +- tests/test_inverse.py | 2 +- tests/test_inverse_array.py | 2 +- tests/test_inverse_collation.py | 2 +- tests/test_invert.py | 2 +- tests/test_invertd.py | 2 +- tests/test_itk_torch_bridge.py | 8 +++++++- tests/test_k_space_spike_noise.py | 2 +- tests/test_k_space_spike_noised.py | 2 +- tests/test_keep_largest_connected_component.py | 2 +- tests/test_keep_largest_connected_componentd.py | 2 +- tests/test_label_filter.py | 2 +- tests/test_label_filterd.py | 2 +- tests/test_label_to_contour.py | 2 +- tests/test_label_to_contourd.py | 2 +- tests/test_label_to_mask.py | 2 +- tests/test_label_to_maskd.py | 2 +- tests/test_lambda.py | 2 +- tests/test_lambdad.py | 2 +- tests/test_lltm.py | 2 +- tests/test_lmdbdataset.py | 2 +- tests/test_lmdbdataset_dist.py | 2 +- tests/test_load_image.py | 2 +- tests/test_load_imaged.py | 2 +- tests/test_localnet.py | 2 +- tests/test_lr_finder.py | 2 +- tests/test_make_nifti.py | 2 +- tests/test_map_and_generate_sampling_centers.py | 2 +- tests/test_map_binary_to_indices.py | 2 +- tests/test_map_classes_to_indices.py | 2 +- tests/test_map_label_value.py | 2 +- tests/test_map_label_valued.py | 2 +- tests/test_mask_intensity.py | 2 +- tests/test_masked_autoencoder_vit.py | 2 +- tests/test_masked_loss.py | 2 +- tests/test_masked_patch_wsi_dataset.py | 2 +- tests/test_matshow3d.py | 2 +- tests/test_mean_ensemble.py | 2 +- tests/test_mean_ensembled.py | 2 +- tests/test_median_smooth.py | 2 +- tests/test_median_smoothd.py | 2 +- tests/test_mednistdataset.py | 2 +- tests/test_meta_affine.py | 2 +- tests/test_meta_tensor.py | 2 +- tests/test_metatensor_integration.py | 2 +- tests/test_milmodel.py | 2 +- tests/test_mmar_download.py | 2 +- tests/test_morphological_ops.py | 2 +- tests/test_mri_utils.py | 2 +- tests/test_multi_scale.py | 2 +- tests/test_net_adapter.py | 2 +- tests/test_network_consistency.py | 2 +- tests/test_nifti_rw.py | 2 +- tests/test_normalize_intensity.py | 2 +- tests/test_normalize_intensityd.py | 2 +- tests/test_numpy_reader.py | 2 +- tests/test_nvtx_decorator.py | 2 +- tests/test_ori_ras_lps.py | 2 +- tests/test_orientation.py | 2 +- tests/test_orientationd.py | 2 +- tests/test_pad_mode.py | 2 +- tests/test_patch_gan_dicriminator.py | 2 +- tests/test_patch_inferer.py | 2 +- tests/test_patch_wsi_dataset.py | 2 +- tests/test_patchembedding.py | 2 +- tests/test_perceptual_loss.py | 2 +- tests/test_persistentdataset_dist.py | 2 +- tests/test_phl_cpu.py | 2 +- tests/test_phl_cuda.py | 2 +- tests/test_plot_2d_or_3d_image.py | 2 +- tests/test_point_based_window_inferer.py | 2 +- tests/test_prepare_batch_default.py | 2 +- tests/test_prepare_batch_default_dist.py | 2 +- tests/test_prepare_batch_extra_input.py | 2 +- tests/test_prepare_batch_hovernet.py | 2 +- tests/test_probnms.py | 2 +- tests/test_probnmsd.py | 2 +- tests/test_profiling.py | 2 +- tests/test_query_memory.py | 2 +- tests/test_quicknat.py | 2 +- tests/test_rand_adjust_contrast.py | 2 +- tests/test_rand_adjust_contrastd.py | 2 +- tests/test_rand_affine.py | 2 +- tests/test_rand_affine_grid.py | 2 +- tests/test_rand_affined.py | 2 +- tests/test_rand_axis_flip.py | 2 +- tests/test_rand_axis_flipd.py | 2 +- tests/test_rand_bias_field.py | 2 +- tests/test_rand_coarse_dropout.py | 2 +- tests/test_rand_crop_by_label_classes.py | 2 +- tests/test_rand_crop_by_label_classesd.py | 2 +- tests/test_rand_crop_by_pos_neg_label.py | 2 +- tests/test_rand_crop_by_pos_neg_labeld.py | 2 +- tests/test_rand_cucim_dict_transform.py | 2 +- tests/test_rand_cucim_transform.py | 2 +- tests/test_rand_deform_grid.py | 2 +- tests/test_rand_elastic_2d.py | 2 +- tests/test_rand_elastic_3d.py | 2 +- tests/test_rand_elasticd_2d.py | 2 +- tests/test_rand_elasticd_3d.py | 2 +- tests/test_rand_flip.py | 2 +- tests/test_rand_flipd.py | 2 +- tests/test_rand_gaussian_noise.py | 2 +- tests/test_rand_gaussian_noised.py | 2 +- tests/test_rand_gaussian_sharpen.py | 2 +- tests/test_rand_gaussian_sharpend.py | 2 +- tests/test_rand_gaussian_smooth.py | 2 +- tests/test_rand_gaussian_smoothd.py | 2 +- tests/test_rand_gibbs_noise.py | 2 +- tests/test_rand_gibbs_noised.py | 2 +- tests/test_rand_grid_distortion.py | 2 +- tests/test_rand_grid_distortiond.py | 2 +- tests/test_rand_grid_patch.py | 2 +- tests/test_rand_grid_patchd.py | 2 +- tests/test_rand_histogram_shift.py | 2 +- tests/test_rand_histogram_shiftd.py | 2 +- tests/test_rand_k_space_spike_noise.py | 2 +- tests/test_rand_k_space_spike_noised.py | 2 +- tests/test_rand_lambda.py | 2 +- tests/test_rand_lambdad.py | 2 +- tests/test_rand_rician_noise.py | 2 +- tests/test_rand_rician_noised.py | 2 +- tests/test_rand_rotate.py | 2 +- tests/test_rand_rotate90.py | 2 +- tests/test_rand_rotate90d.py | 2 +- tests/test_rand_rotated.py | 2 +- tests/test_rand_scale_crop.py | 2 +- tests/test_rand_scale_cropd.py | 2 +- tests/test_rand_scale_intensity.py | 2 +- tests/test_rand_scale_intensity_fixed_mean.py | 2 +- tests/test_rand_scale_intensity_fixed_meand.py | 2 +- tests/test_rand_scale_intensityd.py | 2 +- tests/test_rand_shift_intensity.py | 2 +- tests/test_rand_shift_intensityd.py | 2 +- tests/test_rand_simulate_low_resolution.py | 2 +- tests/test_rand_simulate_low_resolutiond.py | 2 +- tests/test_rand_spatial_crop.py | 2 +- tests/test_rand_spatial_crop_samples.py | 2 +- tests/test_rand_spatial_crop_samplesd.py | 2 +- tests/test_rand_spatial_cropd.py | 2 +- tests/test_rand_std_shift_intensity.py | 2 +- tests/test_rand_std_shift_intensityd.py | 2 +- tests/test_rand_torchiod.py | 2 +- tests/test_rand_weighted_crop.py | 2 +- tests/test_rand_weighted_cropd.py | 2 +- tests/test_rand_zoom.py | 2 +- tests/test_rand_zoomd.py | 2 +- tests/test_randidentity.py | 2 +- tests/test_randtorchvisiond.py | 2 +- tests/test_rankfilter_dist.py | 2 +- tests/test_recon_net_utils.py | 2 +- tests/test_reference_based_normalize_intensity.py | 2 +- tests/test_reference_based_spatial_cropd.py | 2 +- tests/test_reg_loss_integration.py | 2 +- tests/test_regularization.py | 2 +- tests/test_regunet.py | 2 +- tests/test_remove_repeated_channel.py | 2 +- tests/test_remove_repeated_channeld.py | 2 +- tests/test_remove_small_objects.py | 2 +- tests/test_repeat_channel.py | 2 +- tests/test_repeat_channeld.py | 2 +- tests/test_replace_module.py | 2 +- tests/test_resample.py | 2 +- tests/test_resample_backends.py | 2 +- tests/test_resample_to_match.py | 2 +- tests/test_resample_to_matchd.py | 2 +- tests/test_resampler.py | 2 +- tests/test_resize.py | 2 +- tests/test_resize_with_pad_or_crop.py | 2 +- tests/test_resize_with_pad_or_cropd.py | 2 +- tests/test_resized.py | 2 +- tests/test_resnet.py | 2 +- tests/test_retinanet.py | 2 +- tests/test_retinanet_detector.py | 2 +- tests/test_rotate.py | 8 +++++++- tests/test_rotate90.py | 2 +- tests/test_rotate90d.py | 2 +- tests/test_rotated.py | 2 +- tests/test_safe_dtype_range.py | 2 +- tests/test_sample_slices.py | 2 +- tests/test_sampler_dist.py | 2 +- tests/test_savitzky_golay_filter.py | 2 +- tests/test_savitzky_golay_smooth.py | 2 +- tests/test_savitzky_golay_smoothd.py | 2 +- tests/test_scale_intensity.py | 2 +- tests/test_scale_intensity_fixed_mean.py | 2 +- tests/test_scale_intensity_range.py | 2 +- tests/test_scale_intensity_range_percentiles.py | 2 +- tests/test_scale_intensity_range_percentilesd.py | 2 +- tests/test_scale_intensity_ranged.py | 2 +- tests/test_scale_intensityd.py | 2 +- tests/test_scheduler_ddim.py | 2 +- tests/test_scheduler_ddpm.py | 2 +- tests/test_scheduler_pndm.py | 2 +- tests/test_se_block.py | 2 +- tests/test_se_blocks.py | 2 +- tests/test_segresnet.py | 2 +- tests/test_segresnet_ds.py | 2 +- tests/test_selfattention.py | 2 +- tests/test_senet.py | 2 +- tests/test_set_determinism.py | 2 +- tests/test_set_visible_devices.py | 2 +- tests/test_shift_intensity.py | 2 +- tests/test_shift_intensityd.py | 2 +- tests/test_shuffle_buffer.py | 2 +- tests/test_signal_fillempty.py | 2 +- tests/test_signal_fillemptyd.py | 2 +- tests/test_signal_rand_add_squarepulse.py | 2 +- tests/test_signal_rand_add_squarepulse_partial.py | 2 +- tests/test_simulatedelay.py | 2 +- tests/test_simulatedelayd.py | 2 +- tests/test_sliding_patch_wsi_dataset.py | 2 +- tests/test_sliding_window_inference.py | 2 +- tests/test_sliding_window_splitter.py | 2 +- tests/test_smartcachedataset.py | 2 +- tests/test_smooth_field.py | 2 +- tests/test_sobel_gradient.py | 2 +- tests/test_sobel_gradientd.py | 2 +- tests/test_spacing.py | 2 +- tests/test_spacingd.py | 2 +- tests/test_spatial_combine_transforms.py | 2 +- tests/test_spatial_resample.py | 2 +- tests/test_spatial_resampled.py | 2 +- tests/test_spectral_loss.py | 2 +- tests/test_splitdim.py | 2 +- tests/test_splitdimd.py | 2 +- tests/test_squeezedim.py | 2 +- tests/test_squeezedimd.py | 2 +- tests/test_std_shift_intensity.py | 2 +- tests/test_std_shift_intensityd.py | 2 +- tests/test_subpixel_upsample.py | 2 +- tests/test_surface_dice.py | 2 +- tests/test_swin_unetr.py | 2 +- tests/test_tciadataset.py | 2 +- tests/test_testtimeaugmentation.py | 2 +- tests/test_text_encoding.py | 2 +- tests/test_thread_buffer.py | 2 +- tests/test_threadcontainer.py | 2 +- tests/test_threshold_intensity.py | 2 +- tests/test_threshold_intensityd.py | 2 +- tests/test_timedcall_dist.py | 2 +- tests/test_to_contiguous.py | 2 +- tests/test_to_cupy.py | 2 +- tests/test_to_cupyd.py | 2 +- tests/test_to_device.py | 2 +- tests/test_to_deviced.py | 2 +- tests/test_to_from_meta_tensord.py | 2 +- tests/test_to_numpy.py | 2 +- tests/test_to_numpyd.py | 2 +- tests/test_to_pil.py | 2 +- tests/test_to_pild.py | 2 +- tests/test_to_tensor.py | 2 +- tests/test_to_tensord.py | 2 +- tests/test_torchiod.py | 2 +- tests/test_torchvision.py | 2 +- tests/test_torchvision_fc_model.py | 2 +- tests/test_torchvisiond.py | 2 +- tests/test_trainable_bilateral.py | 2 +- tests/test_trainable_joint_bilateral.py | 2 +- tests/test_transchex.py | 2 +- tests/test_transformer.py | 2 +- tests/test_transpose.py | 2 +- tests/test_transposed.py | 2 +- tests/test_trt_compile.py | 2 +- tests/test_tversky_loss.py | 2 +- tests/test_ultrasound_confidence_map_transform.py | 2 +- tests/test_unet.py | 2 +- tests/test_unetr.py | 2 +- tests/test_unetr_block.py | 2 +- tests/{utils.py => test_utils.py} | 5 ++++- tests/test_utils_pytorch_numpy_unification.py | 2 +- tests/test_varautoencoder.py | 2 +- tests/test_varnet.py | 2 +- tests/test_video_datasets.py | 2 +- tests/test_vis_gradcam.py | 2 +- tests/test_vista3d.py | 2 +- tests/test_vista3d_utils.py | 2 +- tests/test_vit.py | 2 +- tests/test_vitautoenc.py | 2 +- tests/test_vnet.py | 2 +- tests/test_vote_ensemble.py | 2 +- tests/test_vote_ensembled.py | 2 +- tests/test_voxelmorph.py | 2 +- tests/test_vqvae.py | 2 +- tests/test_warp.py | 2 +- tests/test_watershed.py | 2 +- tests/test_watershedd.py | 2 +- tests/test_weighted_random_sampler_dist.py | 2 +- tests/test_wsi_sliding_window_splitter.py | 2 +- tests/test_wsireader.py | 2 +- tests/test_zarr_avg_merger.py | 2 +- tests/test_zoom.py | 2 +- tests/test_zoomd.py | 2 +- 529 files changed, 556 insertions(+), 529 deletions(-) rename tests/{utils.py => test_utils.py} (99%) diff --git a/tests/croppers.py b/tests/croppers.py index cfececfa9f..5b7f5148d9 100644 --- a/tests/croppers.py +++ b/tests/croppers.py @@ -20,7 +20,7 @@ from monai.transforms import Randomizable from monai.transforms.lazy.functional import apply_pending from monai.transforms.transform import MapTransform -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose class CropTest(unittest.TestCase): diff --git a/tests/hvd_evenly_divisible_all_gather.py b/tests/hvd_evenly_divisible_all_gather.py index 732ad13b83..24d1575f8f 100644 --- a/tests/hvd_evenly_divisible_all_gather.py +++ b/tests/hvd_evenly_divisible_all_gather.py @@ -15,7 +15,7 @@ from monai.utils import evenly_divisible_all_gather from monai.utils.module import optional_import -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose hvd, has_hvd = optional_import("horovod", name="torch") diff --git a/tests/lazy_transforms_utils.py b/tests/lazy_transforms_utils.py index 1681e26037..41a365fc4e 100644 --- a/tests/lazy_transforms_utils.py +++ b/tests/lazy_transforms_utils.py @@ -16,7 +16,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import InvertibleTransform, MapTransform, Randomizable from monai.transforms.lazy.functional import apply_pending -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose apply_transforms_kwargs = ("pending", "mode", "padding_mode", "dtype", "align_corners") diff --git a/tests/ngc_bundle_download.py b/tests/ngc_bundle_download.py index 107114861c..ee34451d75 100644 --- a/tests/ngc_bundle_download.py +++ b/tests/ngc_bundle_download.py @@ -24,7 +24,7 @@ from monai.bundle import download, load from monai.config import print_debug_info from monai.networks.utils import copy_model_state -from tests.utils import assert_allclose, skip_if_downloading_fails, skip_if_quick, skip_if_windows +from tests.test_utils import assert_allclose, skip_if_downloading_fails, skip_if_quick, skip_if_windows TEST_CASE_NGC_1 = [ "spleen_ct_segmentation", diff --git a/tests/padders.py b/tests/padders.py index a7dce263bb..94f3fa76bc 100644 --- a/tests/padders.py +++ b/tests/padders.py @@ -21,7 +21,7 @@ from monai.transforms.lazy.functional import apply_pending from monai.transforms.transform import MapTransform from monai.utils.enums import NumpyPadMode, PytorchPadMode -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose MODES = [] # Test modes diff --git a/tests/test_activations.py b/tests/test_activations.py index ad18e2bbec..3f0f17f063 100644 --- a/tests/test_activations.py +++ b/tests/test_activations.py @@ -18,7 +18,7 @@ from monai.networks.layers.factories import Act from monai.transforms import Activations -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TEST_CASES = [] for p in TEST_NDARRAYS: diff --git a/tests/test_activationsd.py b/tests/test_activationsd.py index 74968c0bb4..42bd653f8a 100644 --- a/tests/test_activationsd.py +++ b/tests/test_activationsd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import Activationsd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TEST_CASES = [] for p in TEST_NDARRAYS: diff --git a/tests/test_add_coordinate_channels.py b/tests/test_add_coordinate_channels.py index 199fe071e3..b3c9130057 100644 --- a/tests/test_add_coordinate_channels.py +++ b/tests/test_add_coordinate_channels.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import AddCoordinateChannels -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS, TEST_CASES_ERROR_1, TEST_CASES_ERROR_2 = [], [], [] for p in TEST_NDARRAYS: diff --git a/tests/test_add_coordinate_channelsd.py b/tests/test_add_coordinate_channelsd.py index c00240c2d5..ad5e64680c 100644 --- a/tests/test_add_coordinate_channelsd.py +++ b/tests/test_add_coordinate_channelsd.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import AddCoordinateChannelsd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS, TEST_CASES_ERROR_1, TEST_CASES_ERROR_2 = [], [], [] for p in TEST_NDARRAYS: diff --git a/tests/test_add_extreme_points_channel.py b/tests/test_add_extreme_points_channel.py index c453322d6b..d395e07143 100644 --- a/tests/test_add_extreme_points_channel.py +++ b/tests/test_add_extreme_points_channel.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import AddExtremePointsChannel -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose IMG_CHANNEL = 3 TESTS = [] diff --git a/tests/test_add_extreme_points_channeld.py b/tests/test_add_extreme_points_channeld.py index 026f71200a..775766400d 100644 --- a/tests/test_add_extreme_points_channeld.py +++ b/tests/test_add_extreme_points_channeld.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import AddExtremePointsChanneld -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose IMG_CHANNEL = 3 diff --git a/tests/test_adjust_contrast.py b/tests/test_adjust_contrast.py index 2236056558..b99edc75c4 100644 --- a/tests/test_adjust_contrast.py +++ b/tests/test_adjust_contrast.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import AdjustContrast -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose TESTS = [] for invert_image in (True, False): diff --git a/tests/test_adjust_contrastd.py b/tests/test_adjust_contrastd.py index 38eb001226..1eb88260ef 100644 --- a/tests/test_adjust_contrastd.py +++ b/tests/test_adjust_contrastd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import AdjustContrastd -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose TESTS = [] for invert_image in (True, False): diff --git a/tests/test_adn.py b/tests/test_adn.py index 327bf7b20c..6ff8042c69 100644 --- a/tests/test_adn.py +++ b/tests/test_adn.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.networks.blocks import ADN -from tests.utils import TorchImageTestCase2D, TorchImageTestCase3D +from tests.test_utils import TorchImageTestCase2D, TorchImageTestCase3D TEST_CASES_2D = [ [{"act": None}], diff --git a/tests/test_affine.py b/tests/test_affine.py index a08a22ae6f..d81f7d0836 100644 --- a/tests/test_affine.py +++ b/tests/test_affine.py @@ -23,7 +23,7 @@ from monai.transforms.lazy.functional import apply_pending from monai.utils import optional_import from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, test_local_inversion TESTS = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_affine_grid.py b/tests/test_affine_grid.py index 2d89725bb7..64155606b2 100644 --- a/tests/test_affine_grid.py +++ b/tests/test_affine_grid.py @@ -19,7 +19,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import AffineGrid -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, is_tf32_env +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, is_tf32_env TESTS = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_affine_transform.py b/tests/test_affine_transform.py index 11464070e0..7410de9803 100644 --- a/tests/test_affine_transform.py +++ b/tests/test_affine_transform.py @@ -19,7 +19,7 @@ from monai.networks import normalize_transform, to_norm_affine from monai.networks.layers import AffineTransform -from tests.utils import is_tf32_env +from tests.test_utils import is_tf32_env _rtol = 1e-4 if not is_tf32_env() else 5e-3 diff --git a/tests/test_affined.py b/tests/test_affined.py index 94903ff8c7..e9a928e7e7 100644 --- a/tests/test_affined.py +++ b/tests/test_affined.py @@ -20,7 +20,7 @@ from monai.transforms import Affined from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, test_local_inversion TESTS = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_ahnet.py b/tests/test_ahnet.py index 99a177f395..4dd90e8d84 100644 --- a/tests/test_ahnet.py +++ b/tests/test_ahnet.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.blocks import FCN, MCFCN from monai.networks.nets import AHNet -from tests.utils import skip_if_quick, test_pretrained_networks, test_script_save +from tests.test_utils import skip_if_quick, test_pretrained_networks, test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_anchor_box.py b/tests/test_anchor_box.py index 301ce78361..531f708aae 100644 --- a/tests/test_anchor_box.py +++ b/tests/test_anchor_box.py @@ -18,7 +18,7 @@ from monai.apps.detection.utils.anchor_utils import AnchorGenerator, AnchorGeneratorWithAnchorShape from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion, assert_allclose, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, assert_allclose, test_script_save _, has_torchvision = optional_import("torchvision") diff --git a/tests/test_apply.py b/tests/test_apply.py index ca37e945ba..62300bb494 100644 --- a/tests/test_apply.py +++ b/tests/test_apply.py @@ -19,7 +19,7 @@ from monai.transforms.lazy.functional import apply_pending from monai.transforms.utils import create_rotate from monai.utils import LazyAttr, convert_to_tensor -from tests.utils import get_arange_img +from tests.test_utils import get_arange_img def single_2d_transform_cases(): diff --git a/tests/test_as_channel_last.py b/tests/test_as_channel_last.py index 51e1a5c0fd..991c7f0fac 100644 --- a/tests/test_as_channel_last.py +++ b/tests/test_as_channel_last.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import AsChannelLast -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_as_channel_lastd.py b/tests/test_as_channel_lastd.py index aa51ab6056..42076a2a97 100644 --- a/tests/test_as_channel_lastd.py +++ b/tests/test_as_channel_lastd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import AsChannelLastd -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_as_discrete.py b/tests/test_as_discrete.py index bf59752920..e7c4c4a782 100644 --- a/tests/test_as_discrete.py +++ b/tests/test_as_discrete.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import AsDiscrete -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TEST_CASES = [] for p in TEST_NDARRAYS: diff --git a/tests/test_as_discreted.py b/tests/test_as_discreted.py index ed1b3c5b3e..8fc3c1fabf 100644 --- a/tests/test_as_discreted.py +++ b/tests/test_as_discreted.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import AsDiscreted -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TEST_CASES = [] for p in TEST_NDARRAYS: diff --git a/tests/test_atss_box_matcher.py b/tests/test_atss_box_matcher.py index 6133d4839d..fa8462232e 100644 --- a/tests/test_atss_box_matcher.py +++ b/tests/test_atss_box_matcher.py @@ -18,7 +18,7 @@ from monai.apps.detection.utils.ATSS_matcher import ATSSMatcher from monai.data.box_utils import box_iou -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASES = [ [ diff --git a/tests/test_attentionunet.py b/tests/test_attentionunet.py index 6a577f763f..bb14ef0222 100644 --- a/tests/test_attentionunet.py +++ b/tests/test_attentionunet.py @@ -17,7 +17,7 @@ import torch.nn as nn import monai.networks.nets.attentionunet as att -from tests.utils import skip_if_no_cuda, skip_if_quick +from tests.test_utils import skip_if_no_cuda, skip_if_quick def get_net_parameters(net: nn.Module) -> int: diff --git a/tests/test_auto3dseg.py b/tests/test_auto3dseg.py index 5273f0663a..beeaece760 100644 --- a/tests/test_auto3dseg.py +++ b/tests/test_auto3dseg.py @@ -54,7 +54,7 @@ ToDeviced, ) from monai.utils.enums import DataStatsKeys -from tests.utils import skip_if_no_cuda +from tests.test_utils import skip_if_no_cuda device = "cpu" n_workers = 2 diff --git a/tests/test_auto3dseg_bundlegen.py b/tests/test_auto3dseg_bundlegen.py index e7bf6820bc..667909fa81 100644 --- a/tests/test_auto3dseg_bundlegen.py +++ b/tests/test_auto3dseg_bundlegen.py @@ -26,7 +26,7 @@ from monai.bundle.config_parser import ConfigParser from monai.data import create_test_image_3d from monai.utils import set_determinism -from tests.utils import ( +from tests.test_utils import ( SkipIfBeforePyTorchVersion, get_testing_algo_template_path, skip_if_downloading_fails, diff --git a/tests/test_auto3dseg_ensemble.py b/tests/test_auto3dseg_ensemble.py index 7ac553cc0c..bd742fba43 100644 --- a/tests/test_auto3dseg_ensemble.py +++ b/tests/test_auto3dseg_ensemble.py @@ -32,7 +32,7 @@ from monai.transforms import SaveImage from monai.utils import check_parent_dir, optional_import, set_determinism from monai.utils.enums import AlgoKeys -from tests.utils import ( +from tests.test_utils import ( SkipIfBeforePyTorchVersion, get_testing_algo_template_path, skip_if_downloading_fails, diff --git a/tests/test_auto3dseg_hpo.py b/tests/test_auto3dseg_hpo.py index 53d09defa0..cedff8e99a 100644 --- a/tests/test_auto3dseg_hpo.py +++ b/tests/test_auto3dseg_hpo.py @@ -25,7 +25,7 @@ from monai.data import create_test_image_3d from monai.utils import optional_import from monai.utils.enums import AlgoKeys -from tests.utils import ( +from tests.test_utils import ( SkipIfBeforePyTorchVersion, get_testing_algo_template_path, skip_if_downloading_fails, diff --git a/tests/test_autoencoder.py b/tests/test_autoencoder.py index 6408f6a6d0..3f2f131900 100644 --- a/tests/test_autoencoder.py +++ b/tests/test_autoencoder.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.layers import Act from monai.networks.nets import AutoEncoder -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") diff --git a/tests/test_autoencoderkl.py b/tests/test_autoencoderkl.py index d15cb79084..0a3db60830 100644 --- a/tests/test_autoencoderkl.py +++ b/tests/test_autoencoderkl.py @@ -23,7 +23,7 @@ from monai.networks import eval_mode from monai.networks.nets import AutoencoderKL from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_downloading_fails, testing_data_config +from tests.test_utils import SkipIfBeforePyTorchVersion, skip_if_downloading_fails, testing_data_config tqdm, has_tqdm = optional_import("tqdm", name="tqdm") _, has_einops = optional_import("einops") diff --git a/tests/test_autoencoderkl_maisi.py b/tests/test_autoencoderkl_maisi.py index 0e9f427fb6..99f1dbdc76 100644 --- a/tests/test_autoencoderkl_maisi.py +++ b/tests/test_autoencoderkl_maisi.py @@ -19,7 +19,7 @@ from monai.apps.generation.maisi.networks.autoencoderkl_maisi import AutoencoderKlMaisi from monai.networks import eval_mode from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion tqdm, has_tqdm = optional_import("tqdm", name="tqdm") _, has_einops = optional_import("einops") diff --git a/tests/test_avg_merger.py b/tests/test_avg_merger.py index 7995d63271..9e6988e854 100644 --- a/tests/test_avg_merger.py +++ b/tests/test_avg_merger.py @@ -18,7 +18,7 @@ from torch.nn.functional import pad from monai.inferers import AvgMerger -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TENSOR_4x4 = torch.randint(low=0, high=255, size=(2, 3, 4, 4), dtype=torch.float32) TENSOR_4x4_WITH_NAN = TENSOR_4x4.clone() diff --git a/tests/test_basic_unet.py b/tests/test_basic_unet.py index 770750851f..976846d53d 100644 --- a/tests/test_basic_unet.py +++ b/tests/test_basic_unet.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets import BasicUNet -from tests.utils import test_script_save +from tests.test_utils import test_script_save CASES_1D = [] for mode in ["pixelshuffle", "nontrainable", "deconv", None]: diff --git a/tests/test_basic_unetplusplus.py b/tests/test_basic_unetplusplus.py index 6438b5e0d4..11ac95bf48 100644 --- a/tests/test_basic_unetplusplus.py +++ b/tests/test_basic_unetplusplus.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets import BasicUNetPlusPlus -from tests.utils import test_script_save +from tests.test_utils import test_script_save CASES_1D = [] for mode in ["pixelshuffle", "nontrainable", "deconv", None]: diff --git a/tests/test_bilateral_approx_cpu.py b/tests/test_bilateral_approx_cpu.py index e8a55e1f76..24f35990dc 100644 --- a/tests/test_bilateral_approx_cpu.py +++ b/tests/test_bilateral_approx_cpu.py @@ -19,7 +19,7 @@ from torch.autograd import gradcheck from monai.networks.layers.filtering import BilateralFilter -from tests.utils import skip_if_no_cpp_extension +from tests.test_utils import skip_if_no_cpp_extension TEST_CASES = [ [ diff --git a/tests/test_bilateral_approx_cuda.py b/tests/test_bilateral_approx_cuda.py index 4ad15d9646..fddf7f002e 100644 --- a/tests/test_bilateral_approx_cuda.py +++ b/tests/test_bilateral_approx_cuda.py @@ -19,7 +19,7 @@ from torch.autograd import gradcheck from monai.networks.layers.filtering import BilateralFilter -from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda +from tests.test_utils import skip_if_no_cpp_extension, skip_if_no_cuda TEST_CASES = [ [ diff --git a/tests/test_bilateral_precise.py b/tests/test_bilateral_precise.py index e13ede5bfd..a917398657 100644 --- a/tests/test_bilateral_precise.py +++ b/tests/test_bilateral_precise.py @@ -19,7 +19,7 @@ from torch.autograd import gradcheck from monai.networks.layers.filtering import BilateralFilter -from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda, skip_if_quick +from tests.test_utils import skip_if_no_cpp_extension, skip_if_no_cuda, skip_if_quick TEST_CASES = [ [ diff --git a/tests/test_blend_images.py b/tests/test_blend_images.py index 700ae1fe58..589ae2d7c8 100644 --- a/tests/test_blend_images.py +++ b/tests/test_blend_images.py @@ -22,7 +22,7 @@ from monai.transforms.utils_pytorch_numpy_unification import moveaxis from monai.utils.module import optional_import from monai.visualize.utils import blend_images -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS plt, has_matplotlib = optional_import("matplotlib.pyplot") diff --git a/tests/test_bounding_rect.py b/tests/test_bounding_rect.py index b879fa6093..30fe66a8ce 100644 --- a/tests/test_bounding_rect.py +++ b/tests/test_bounding_rect.py @@ -18,7 +18,7 @@ import monai from monai.transforms import BoundingRect -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TEST_CASE_1 = [(2, 3), [[0, 0], [1, 2]]] diff --git a/tests/test_bounding_rectd.py b/tests/test_bounding_rectd.py index 96435036b1..4e46805e76 100644 --- a/tests/test_bounding_rectd.py +++ b/tests/test_bounding_rectd.py @@ -18,7 +18,7 @@ import monai from monai.transforms import BoundingRectD -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TEST_CASE_1 = [(2, 3), [[0, 0], [1, 2]]] diff --git a/tests/test_box_coder.py b/tests/test_box_coder.py index 75ff650d6c..e253b30531 100644 --- a/tests/test_box_coder.py +++ b/tests/test_box_coder.py @@ -17,7 +17,7 @@ from monai.apps.detection.utils.box_coder import BoxCoder from monai.transforms import CastToType -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestBoxTransform(unittest.TestCase): diff --git a/tests/test_box_transform.py b/tests/test_box_transform.py index e99f95fa32..4084fab88b 100644 --- a/tests/test_box_transform.py +++ b/tests/test_box_transform.py @@ -36,7 +36,7 @@ ) from monai.data.meta_tensor import MetaTensor from monai.transforms import CastToTyped, Invertd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS_3D = [] boxes = [[0, 0, 0, 0, 0, 0], [0, 1, 0, 2, 3, 3], [0, 1, 1, 2, 3, 4]] diff --git a/tests/test_box_utils.py b/tests/test_box_utils.py index 3c05efe0d0..d277fe1af0 100644 --- a/tests/test_box_utils.py +++ b/tests/test_box_utils.py @@ -35,7 +35,7 @@ non_max_suppression, ) from monai.utils.type_conversion import convert_data_type -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_bundle_ckpt_export.py b/tests/test_bundle_ckpt_export.py index cfcadcfc4c..7c5f359e53 100644 --- a/tests/test_bundle_ckpt_export.py +++ b/tests/test_bundle_ckpt_export.py @@ -21,7 +21,7 @@ from monai.bundle import ConfigParser from monai.data import load_net_with_metadata from monai.networks import save_state -from tests.utils import command_line_tests, skip_if_windows +from tests.test_utils import command_line_tests, skip_if_windows TEST_CASE_1 = ["", ""] diff --git a/tests/test_bundle_download.py b/tests/test_bundle_download.py index 399c61b117..e6f8bb24b2 100644 --- a/tests/test_bundle_download.py +++ b/tests/test_bundle_download.py @@ -27,7 +27,7 @@ from monai.bundle import ConfigParser, create_workflow, load from monai.bundle.scripts import _examine_monai_version, _list_latest_versions, download from monai.utils import optional_import -from tests.utils import ( +from tests.test_utils import ( SkipIfBeforePyTorchVersion, assert_allclose, command_line_tests, diff --git a/tests/test_bundle_get_data.py b/tests/test_bundle_get_data.py index f84713fbe3..6fb73263ff 100644 --- a/tests/test_bundle_get_data.py +++ b/tests/test_bundle_get_data.py @@ -17,7 +17,7 @@ from monai.bundle import get_all_bundles_list, get_bundle_info, get_bundle_versions from monai.utils import optional_import -from tests.utils import SkipIfNoModule, skip_if_downloading_fails, skip_if_quick, skip_if_windows +from tests.test_utils import SkipIfNoModule, skip_if_downloading_fails, skip_if_quick, skip_if_windows requests, _ = optional_import("requests") diff --git a/tests/test_bundle_init_bundle.py b/tests/test_bundle_init_bundle.py index eb831093d5..90d02cdafa 100644 --- a/tests/test_bundle_init_bundle.py +++ b/tests/test_bundle_init_bundle.py @@ -18,7 +18,7 @@ import torch from monai.networks.nets import UNet -from tests.utils import command_line_tests, skip_if_windows +from tests.test_utils import command_line_tests, skip_if_windows @skip_if_windows diff --git a/tests/test_bundle_onnx_export.py b/tests/test_bundle_onnx_export.py index ee22d7caef..6453f47fd5 100644 --- a/tests/test_bundle_onnx_export.py +++ b/tests/test_bundle_onnx_export.py @@ -19,7 +19,7 @@ from monai.bundle import ConfigParser from monai.networks import save_state -from tests.utils import SkipIfBeforePyTorchVersion, SkipIfNoModule, command_line_tests, skip_if_windows +from tests.test_utils import SkipIfBeforePyTorchVersion, SkipIfNoModule, command_line_tests, skip_if_windows TEST_CASE_1 = ["True"] TEST_CASE_2 = ["False"] diff --git a/tests/test_bundle_push_to_hf_hub.py b/tests/test_bundle_push_to_hf_hub.py index 39368c6f40..d164b460a0 100644 --- a/tests/test_bundle_push_to_hf_hub.py +++ b/tests/test_bundle_push_to_hf_hub.py @@ -20,7 +20,7 @@ from monai.bundle import push_to_hf_hub from monai.utils import optional_import -from tests.utils import skip_if_quick +from tests.test_utils import skip_if_quick huggingface_hub, has_huggingface_hub = optional_import("huggingface_hub") diff --git a/tests/test_bundle_trt_export.py b/tests/test_bundle_trt_export.py index 27e1ee97a8..142883845d 100644 --- a/tests/test_bundle_trt_export.py +++ b/tests/test_bundle_trt_export.py @@ -22,7 +22,7 @@ from monai.data import load_net_with_metadata from monai.networks import save_state from monai.utils import optional_import -from tests.utils import ( +from tests.test_utils import ( SkipIfBeforeComputeCapabilityVersion, command_line_tests, skip_if_no_cuda, diff --git a/tests/test_bundle_utils.py b/tests/test_bundle_utils.py index 47c534f3b6..fd8d35bc1a 100644 --- a/tests/test_bundle_utils.py +++ b/tests/test_bundle_utils.py @@ -22,7 +22,7 @@ from monai.bundle.utils import load_bundle_config from monai.networks.nets import UNet from monai.utils import pprint_edges -from tests.utils import command_line_tests, skip_if_windows +from tests.test_utils import command_line_tests, skip_if_windows metadata = """ { diff --git a/tests/test_bundle_verify_metadata.py b/tests/test_bundle_verify_metadata.py index f6c2192621..ad10121bdd 100644 --- a/tests/test_bundle_verify_metadata.py +++ b/tests/test_bundle_verify_metadata.py @@ -19,7 +19,7 @@ from parameterized import parameterized from monai.bundle import ConfigParser, verify_metadata -from tests.utils import command_line_tests, download_url_or_skip_test, skip_if_windows, testing_data_config +from tests.test_utils import command_line_tests, download_url_or_skip_test, skip_if_windows, testing_data_config SCHEMA_FILE = os.path.join(os.path.dirname(__file__), "testing_data", "schema.json") diff --git a/tests/test_bundle_verify_net.py b/tests/test_bundle_verify_net.py index f55fdd597b..c7d508b019 100644 --- a/tests/test_bundle_verify_net.py +++ b/tests/test_bundle_verify_net.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.bundle import ConfigParser, verify_net_in_out -from tests.utils import command_line_tests, skip_if_no_cuda, skip_if_windows +from tests.test_utils import command_line_tests, skip_if_no_cuda, skip_if_windows TEST_CASE_1 = [ os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json"), diff --git a/tests/test_call_dist.py b/tests/test_call_dist.py index 503cb5e792..63ab5982c7 100644 --- a/tests/test_call_dist.py +++ b/tests/test_call_dist.py @@ -13,7 +13,7 @@ import unittest -from tests.utils import DistCall, DistTestCase +from tests.test_utils import DistCall, DistTestCase class DistributedCallTest(DistTestCase): diff --git a/tests/test_cast_to_type.py b/tests/test_cast_to_type.py index 035260804e..9e3c2d331c 100644 --- a/tests/test_cast_to_type.py +++ b/tests/test_cast_to_type.py @@ -20,7 +20,7 @@ from monai.transforms import CastToType from monai.utils import optional_import from monai.utils.type_conversion import get_equivalent_dtype -from tests.utils import HAS_CUPY, TEST_NDARRAYS +from tests.test_utils import HAS_CUPY, TEST_NDARRAYS cp, _ = optional_import("cupy") diff --git a/tests/test_cast_to_typed.py b/tests/test_cast_to_typed.py index 81e17117a9..5be6dd2b9f 100644 --- a/tests/test_cast_to_typed.py +++ b/tests/test_cast_to_typed.py @@ -19,7 +19,7 @@ from monai.transforms import CastToTyped from monai.utils import optional_import -from tests.utils import HAS_CUPY +from tests.test_utils import HAS_CUPY cp, _ = optional_import("cupy") diff --git a/tests/test_classes_to_indices.py b/tests/test_classes_to_indices.py index a7377dac16..df7e367c73 100644 --- a/tests/test_classes_to_indices.py +++ b/tests/test_classes_to_indices.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import ClassesToIndices -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS_CASES = [] for p in TEST_NDARRAYS: diff --git a/tests/test_classes_to_indicesd.py b/tests/test_classes_to_indicesd.py index dead1ae753..829f31b594 100644 --- a/tests/test_classes_to_indicesd.py +++ b/tests/test_classes_to_indicesd.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import ClassesToIndicesd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS_CASES = [] for p in TEST_NDARRAYS: diff --git a/tests/test_clip_intensity_percentiles.py b/tests/test_clip_intensity_percentiles.py index 77f811db87..2f71d2e894 100644 --- a/tests/test_clip_intensity_percentiles.py +++ b/tests/test_clip_intensity_percentiles.py @@ -19,7 +19,7 @@ from monai.transforms.utils import soft_clip from monai.transforms.utils_pytorch_numpy_unification import clip, percentile from monai.utils.type_conversion import convert_to_tensor -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose def test_hard_clip_func(im, lower, upper): diff --git a/tests/test_clip_intensity_percentilesd.py b/tests/test_clip_intensity_percentilesd.py index 3e06b18418..d151a6065b 100644 --- a/tests/test_clip_intensity_percentilesd.py +++ b/tests/test_clip_intensity_percentilesd.py @@ -18,7 +18,7 @@ from monai.transforms import ClipIntensityPercentilesd from monai.transforms.utils_pytorch_numpy_unification import clip, percentile from monai.utils.type_conversion import convert_to_tensor -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose from .test_clip_intensity_percentiles import test_hard_clip_func, test_soft_clip_func diff --git a/tests/test_complex_utils.py b/tests/test_complex_utils.py index fdcee4babe..26caa82438 100644 --- a/tests/test_complex_utils.py +++ b/tests/test_complex_utils.py @@ -18,7 +18,7 @@ from monai.apps.reconstruction.complex_utils import complex_abs, complex_conj, complex_mul, convert_to_tensor_complex from monai.utils.type_conversion import convert_data_type -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose # test case for convert_to_tensor_complex im_complex = [[1.0 + 1.0j, 1.0 + 1.0j], [1.0 + 1.0j, 1.0 + 1.0j]] diff --git a/tests/test_compute_confusion_matrix.py b/tests/test_compute_confusion_matrix.py index 248f16a7fe..5b06bb88cd 100644 --- a/tests/test_compute_confusion_matrix.py +++ b/tests/test_compute_confusion_matrix.py @@ -24,7 +24,7 @@ do_metric_reduction, get_confusion_matrix, ) -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose _device = "cuda:0" if torch.cuda.is_available() else "cpu" # input data diff --git a/tests/test_compute_f_beta.py b/tests/test_compute_f_beta.py index be2a7fc176..071c8963f2 100644 --- a/tests/test_compute_f_beta.py +++ b/tests/test_compute_f_beta.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.metrics import FBetaScore -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose _device = "cuda:0" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_compute_ho_ver_maps.py b/tests/test_compute_ho_ver_maps.py index 6e46cf2b1e..b1e949b0be 100644 --- a/tests/test_compute_ho_ver_maps.py +++ b/tests/test_compute_ho_ver_maps.py @@ -19,7 +19,7 @@ from monai.transforms.intensity.array import ComputeHoVerMaps from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_skimage = optional_import("skimage", "0.19.0", min_version) diff --git a/tests/test_compute_ho_ver_maps_d.py b/tests/test_compute_ho_ver_maps_d.py index 0734e2e731..7d57492250 100644 --- a/tests/test_compute_ho_ver_maps_d.py +++ b/tests/test_compute_ho_ver_maps_d.py @@ -19,7 +19,7 @@ from monai.transforms.intensity.dictionary import ComputeHoVerMapsd from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_skimage = optional_import("skimage", "0.19.0", min_version) diff --git a/tests/test_compute_panoptic_quality.py b/tests/test_compute_panoptic_quality.py index a916ea32b2..304c72e574 100644 --- a/tests/test_compute_panoptic_quality.py +++ b/tests/test_compute_panoptic_quality.py @@ -19,7 +19,7 @@ from parameterized import parameterized from monai.metrics import PanopticQualityMetric, compute_panoptic_quality -from tests.utils import SkipIfNoModule +from tests.test_utils import SkipIfNoModule _device = "cuda:0" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_concat_itemsd.py b/tests/test_concat_itemsd.py index 564ddf5c1f..b1d461cac8 100644 --- a/tests/test_concat_itemsd.py +++ b/tests/test_concat_itemsd.py @@ -18,7 +18,7 @@ from monai.data import MetaTensor from monai.transforms import ConcatItemsd -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestConcatItemsd(unittest.TestCase): diff --git a/tests/test_config_parser.py b/tests/test_config_parser.py index 2b00c9f9d1..da2cea2625 100644 --- a/tests/test_config_parser.py +++ b/tests/test_config_parser.py @@ -26,7 +26,7 @@ from monai.data import DataLoader, Dataset from monai.transforms import Compose, LoadImaged, RandTorchVisiond from monai.utils import min_version, optional_import -from tests.utils import TimedCall +from tests.test_utils import TimedCall _, has_tv = optional_import("torchvision", "0.8.0", min_version) _, has_yaml = optional_import("yaml") diff --git a/tests/test_controlnet.py b/tests/test_controlnet.py index 4746c7ce22..9503518762 100644 --- a/tests/test_controlnet.py +++ b/tests/test_controlnet.py @@ -23,7 +23,7 @@ from monai.networks import eval_mode from monai.networks.nets.controlnet import ControlNet from monai.utils import optional_import -from tests.utils import skip_if_downloading_fails, testing_data_config +from tests.test_utils import skip_if_downloading_fails, testing_data_config _, has_einops = optional_import("einops") UNCOND_CASES_2D = [ diff --git a/tests/test_controlnet_maisi.py b/tests/test_controlnet_maisi.py index bfdf25ec6e..0166c33662 100644 --- a/tests/test_controlnet_maisi.py +++ b/tests/test_controlnet_maisi.py @@ -20,7 +20,7 @@ from monai.apps.generation.maisi.networks.controlnet_maisi import ControlNetMaisi from monai.networks import eval_mode from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion _, has_einops = optional_import("einops") diff --git a/tests/test_convert_box_points.py b/tests/test_convert_box_points.py index 5e3d7ee645..1a21050b2c 100644 --- a/tests/test_convert_box_points.py +++ b/tests/test_convert_box_points.py @@ -18,7 +18,7 @@ from monai.data.box_utils import convert_box_to_standard_mode from monai.transforms.spatial.array import ConvertBoxToPoints, ConvertPointsToBoxes -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_POINTS_2D = [ [ diff --git a/tests/test_convert_data_type.py b/tests/test_convert_data_type.py index a27a05cf28..a64e9553b1 100644 --- a/tests/test_convert_data_type.py +++ b/tests/test_convert_data_type.py @@ -19,7 +19,7 @@ from monai.data import MetaTensor from monai.utils.type_conversion import convert_data_type, convert_to_dst_type, get_equivalent_dtype -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS: list[tuple] = [] for in_type in TEST_NDARRAYS_ALL + (int, float): diff --git a/tests/test_convert_to_multi_channel.py b/tests/test_convert_to_multi_channel.py index 98bbea1ebf..ff08fe1145 100644 --- a/tests/test_convert_to_multi_channel.py +++ b/tests/test_convert_to_multi_channel.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import ConvertToMultiChannelBasedOnBratsClasses -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_convert_to_onnx.py b/tests/test_convert_to_onnx.py index 798c510800..23e5951b85 100644 --- a/tests/test_convert_to_onnx.py +++ b/tests/test_convert_to_onnx.py @@ -21,7 +21,7 @@ from monai.networks import convert_to_onnx from monai.networks.nets import SegResNet, UNet from monai.utils.module import pytorch_after -from tests.utils import SkipIfBeforePyTorchVersion, SkipIfNoModule, optional_import, skip_if_quick +from tests.test_utils import SkipIfBeforePyTorchVersion, SkipIfNoModule, optional_import, skip_if_quick if torch.cuda.is_available(): TORCH_DEVICE_OPTIONS = ["cpu", "cuda"] diff --git a/tests/test_convert_to_trt.py b/tests/test_convert_to_trt.py index a7b1edec3c..18f2e6d13c 100644 --- a/tests/test_convert_to_trt.py +++ b/tests/test_convert_to_trt.py @@ -20,7 +20,7 @@ from monai.networks import convert_to_trt from monai.networks.nets import UNet from monai.utils import optional_import -from tests.utils import SkipIfBeforeComputeCapabilityVersion, skip_if_no_cuda, skip_if_quick, skip_if_windows +from tests.test_utils import SkipIfBeforeComputeCapabilityVersion, skip_if_no_cuda, skip_if_quick, skip_if_windows _, has_torchtrt = optional_import( "torch_tensorrt", diff --git a/tests/test_convolutions.py b/tests/test_convolutions.py index 77bc12770f..90695d9dd5 100644 --- a/tests/test_convolutions.py +++ b/tests/test_convolutions.py @@ -14,7 +14,7 @@ import unittest from monai.networks.blocks import Convolution, ResidualUnit -from tests.utils import TorchImageTestCase2D, TorchImageTestCase3D +from tests.test_utils import TorchImageTestCase2D, TorchImageTestCase3D class TestConvolution2D(TorchImageTestCase2D): diff --git a/tests/test_copy_itemsd.py b/tests/test_copy_itemsd.py index a78e08897b..31069a8b90 100644 --- a/tests/test_copy_itemsd.py +++ b/tests/test_copy_itemsd.py @@ -20,7 +20,7 @@ from monai.networks import eval_mode from monai.transforms import CopyItemsd from monai.utils import ensure_tuple -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = ["img", 1, "img_1"] diff --git a/tests/test_correct_crop_centers.py b/tests/test_correct_crop_centers.py index 82b0b93b53..df0e32f5c4 100644 --- a/tests/test_correct_crop_centers.py +++ b/tests/test_correct_crop_centers.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms.utils import correct_crop_centers -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TESTS = [[[1, 5, 0], [2, 2, 2], [10, 10, 10]], [[4, 4, 4], [2, 2, 1], [10, 10, 10]]] diff --git a/tests/test_create_grid_and_affine.py b/tests/test_create_grid_and_affine.py index 4910a10470..a0aca3bbc3 100644 --- a/tests/test_create_grid_and_affine.py +++ b/tests/test_create_grid_and_affine.py @@ -24,7 +24,7 @@ create_shear, create_translate, ) -from tests.utils import assert_allclose, is_tf32_env +from tests.test_utils import assert_allclose, is_tf32_env class TestCreateGrid(unittest.TestCase): diff --git a/tests/test_crf_cpu.py b/tests/test_crf_cpu.py index a7ae0ff2df..2dedd12eaf 100644 --- a/tests/test_crf_cpu.py +++ b/tests/test_crf_cpu.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.networks.blocks import CRF -from tests.utils import skip_if_no_cpp_extension +from tests.test_utils import skip_if_no_cpp_extension TEST_CASES = [ [ diff --git a/tests/test_crf_cuda.py b/tests/test_crf_cuda.py index d5329aab15..e1114f65fd 100644 --- a/tests/test_crf_cuda.py +++ b/tests/test_crf_cuda.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.networks.blocks import CRF -from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda +from tests.test_utils import skip_if_no_cpp_extension, skip_if_no_cuda TEST_CASES = [ [ diff --git a/tests/test_crop_foreground.py b/tests/test_crop_foreground.py index f63cb3e8b0..d8f3c54d58 100644 --- a/tests/test_crop_foreground.py +++ b/tests/test_crop_foreground.py @@ -20,7 +20,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import CropForeground from monai.transforms.lazy.functional import apply_pending -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TEST_COORDS, TESTS, TEST_LAZY_ERROR = [], [], [] diff --git a/tests/test_crop_foregroundd.py b/tests/test_crop_foregroundd.py index 92954aa81e..63601ecc29 100644 --- a/tests/test_crop_foregroundd.py +++ b/tests/test_crop_foregroundd.py @@ -19,7 +19,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import CropForegroundd from monai.transforms.lazy.functional import apply_pending -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TEST_POSITION, TESTS = [], [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_cross_validation.py b/tests/test_cross_validation.py index 6d0f2319fb..a80af5b2a3 100644 --- a/tests/test_cross_validation.py +++ b/tests/test_cross_validation.py @@ -17,7 +17,7 @@ from monai.apps import CrossValidation, DecathlonDataset from monai.data import MetaTensor from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, ScaleIntensityd -from tests.utils import skip_if_downloading_fails, skip_if_quick +from tests.test_utils import skip_if_downloading_fails, skip_if_quick class TestCrossValidation(unittest.TestCase): diff --git a/tests/test_crossattention.py b/tests/test_crossattention.py index e034e42290..8ea7c33fea 100644 --- a/tests/test_crossattention.py +++ b/tests/test_crossattention.py @@ -22,7 +22,7 @@ from monai.networks.blocks.crossattention import CrossAttentionBlock from monai.networks.layers.factories import RelPosEmbedding from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion, assert_allclose +from tests.test_utils import SkipIfBeforePyTorchVersion, assert_allclose einops, has_einops = optional_import("einops") diff --git a/tests/test_csv_iterable_dataset.py b/tests/test_csv_iterable_dataset.py index e06da0c41b..3dc54e3151 100644 --- a/tests/test_csv_iterable_dataset.py +++ b/tests/test_csv_iterable_dataset.py @@ -21,7 +21,7 @@ from monai.data import CSVIterableDataset, DataLoader from monai.transforms import ToNumpyd -from tests.utils import skip_if_windows +from tests.test_utils import skip_if_windows @skip_if_windows diff --git a/tests/test_cucim_dict_transform.py b/tests/test_cucim_dict_transform.py index 3c5703a34c..1b7f3dafec 100644 --- a/tests/test_cucim_dict_transform.py +++ b/tests/test_cucim_dict_transform.py @@ -18,7 +18,7 @@ from monai.transforms import CuCIMd from monai.utils import optional_import, set_determinism -from tests.utils import HAS_CUPY, skip_if_no_cuda +from tests.test_utils import HAS_CUPY, skip_if_no_cuda _, has_cut = optional_import("cucim.core.operations.expose.transform") cp, _ = optional_import("cupy") diff --git a/tests/test_cucim_transform.py b/tests/test_cucim_transform.py index 162e16b52a..264451444d 100644 --- a/tests/test_cucim_transform.py +++ b/tests/test_cucim_transform.py @@ -18,7 +18,7 @@ from monai.transforms import CuCIM from monai.utils import optional_import, set_determinism -from tests.utils import HAS_CUPY, skip_if_no_cuda +from tests.test_utils import HAS_CUPY, skip_if_no_cuda _, has_cut = optional_import("cucim.core.operations.expose.transform") cp, _ = optional_import("cupy") diff --git a/tests/test_cumulative.py b/tests/test_cumulative.py index d3b6ba094c..ffa5cf312f 100644 --- a/tests/test_cumulative.py +++ b/tests/test_cumulative.py @@ -16,7 +16,7 @@ import torch from monai.metrics import Cumulative -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestCumulative(unittest.TestCase): diff --git a/tests/test_cumulative_average_dist.py b/tests/test_cumulative_average_dist.py index 30c01c21ee..ddbfa1b9b1 100644 --- a/tests/test_cumulative_average_dist.py +++ b/tests/test_cumulative_average_dist.py @@ -18,7 +18,7 @@ import torch.distributed as dist from monai.metrics import CumulativeAverage -from tests.utils import DistCall, DistTestCase, SkipIfBeforePyTorchVersion +from tests.test_utils import DistCall, DistTestCase, SkipIfBeforePyTorchVersion @SkipIfBeforePyTorchVersion((1, 8)) diff --git a/tests/test_cv2_dist.py b/tests/test_cv2_dist.py index 562c205763..25d20b15e6 100644 --- a/tests/test_cv2_dist.py +++ b/tests/test_cv2_dist.py @@ -20,7 +20,7 @@ # FIXME: test for the workaround of https://github.com/Project-MONAI/MONAI/issues/5291 from monai.config.deviceconfig import print_config -from tests.utils import skip_if_no_cuda +from tests.test_utils import skip_if_no_cuda def main_worker(rank, ngpus_per_node, port): diff --git a/tests/test_daf3d.py b/tests/test_daf3d.py index d20cb3cfd1..cbd150f439 100644 --- a/tests/test_daf3d.py +++ b/tests/test_daf3d.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.nets import DAF3D from monai.utils import optional_import -from tests.utils import test_script_save +from tests.test_utils import test_script_save _, has_tv = optional_import("torchvision") diff --git a/tests/test_dataloader.py b/tests/test_dataloader.py index 73e27799f7..929f362341 100644 --- a/tests/test_dataloader.py +++ b/tests/test_dataloader.py @@ -21,7 +21,7 @@ from monai.data import CacheDataset, DataLoader, Dataset, ZipDataset from monai.transforms import Compose, DataStatsd, Randomizable, SimulateDelayd from monai.utils import convert_to_numpy, set_determinism -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [[{"image": np.asarray([1, 2, 3])}, {"image": np.asarray([4, 5])}]] diff --git a/tests/test_decathlondataset.py b/tests/test_decathlondataset.py index 70a2a6c06c..f4f6262697 100644 --- a/tests/test_decathlondataset.py +++ b/tests/test_decathlondataset.py @@ -19,7 +19,7 @@ from monai.apps import DecathlonDataset from monai.data import MetaTensor from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, ScaleIntensityd -from tests.utils import skip_if_downloading_fails, skip_if_quick +from tests.test_utils import skip_if_downloading_fails, skip_if_quick class TestDecathlonDataset(unittest.TestCase): diff --git a/tests/test_decollate.py b/tests/test_decollate.py index 92f7c89e28..2eaec0937c 100644 --- a/tests/test_decollate.py +++ b/tests/test_decollate.py @@ -40,7 +40,7 @@ from monai.transforms.spatial.dictionary import RandAffined, RandRotate90d from monai.utils import optional_import, set_determinism from monai.utils.enums import PostFix, TraceKeys -from tests.utils import make_nifti_image +from tests.test_utils import make_nifti_image _, has_nib = optional_import("nibabel") diff --git a/tests/test_denseblock.py b/tests/test_denseblock.py index b741582422..2f80954983 100644 --- a/tests/test_denseblock.py +++ b/tests/test_denseblock.py @@ -16,7 +16,7 @@ import torch.nn as nn from monai.networks.blocks import ConvDenseBlock, DenseBlock -from tests.utils import TorchImageTestCase2D, TorchImageTestCase3D +from tests.test_utils import TorchImageTestCase2D, TorchImageTestCase3D class TestDenseBlock2D(TorchImageTestCase2D): diff --git a/tests/test_densenet.py b/tests/test_densenet.py index ee4be9003b..e28528195d 100644 --- a/tests/test_densenet.py +++ b/tests/test_densenet.py @@ -21,7 +21,7 @@ from monai.networks import eval_mode from monai.networks.nets import DenseNet121, Densenet169, DenseNet264, densenet201 from monai.utils import optional_import -from tests.utils import skip_if_downloading_fails, skip_if_quick, test_script_save +from tests.test_utils import skip_if_downloading_fails, skip_if_quick, test_script_save if TYPE_CHECKING: import torchvision diff --git a/tests/test_detect_envelope.py b/tests/test_detect_envelope.py index f9c2b5ac53..ff8367aa6e 100644 --- a/tests/test_detect_envelope.py +++ b/tests/test_detect_envelope.py @@ -19,7 +19,7 @@ from monai.transforms import DetectEnvelope from monai.utils import OptionalImportError -from tests.utils import TEST_NDARRAYS, SkipIfModule, SkipIfNoModule, assert_allclose +from tests.test_utils import TEST_NDARRAYS, SkipIfModule, SkipIfNoModule, assert_allclose n_samples = 500 hann_windowed_sine = np.sin(2 * np.pi * 10 * np.linspace(0, 1, n_samples)) * np.hanning(n_samples) diff --git a/tests/test_detector_boxselector.py b/tests/test_detector_boxselector.py index 326ecd5773..a252ef15e9 100644 --- a/tests/test_detector_boxselector.py +++ b/tests/test_detector_boxselector.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.apps.detection.utils.box_selector import BoxSelector -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose device = "cuda" if torch.cuda.is_available() else "cpu" num_anchors = 7 diff --git a/tests/test_detector_utils.py b/tests/test_detector_utils.py index 352e1c2faf..d84719cf3f 100644 --- a/tests/test_detector_utils.py +++ b/tests/test_detector_utils.py @@ -19,7 +19,7 @@ from monai.apps.detection.utils.detector_utils import preprocess_images from monai.utils import ensure_tuple -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [ # 3D, batch 3, 2 input channel { diff --git a/tests/test_dice_focal_loss.py b/tests/test_dice_focal_loss.py index f769aac69f..e04b0c1d56 100644 --- a/tests/test_dice_focal_loss.py +++ b/tests/test_dice_focal_loss.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.losses import DiceFocalLoss, DiceLoss, FocalLoss -from tests.utils import test_script_save +from tests.test_utils import test_script_save class TestDiceFocalLoss(unittest.TestCase): diff --git a/tests/test_dice_loss.py b/tests/test_dice_loss.py index cea6ccf113..294312d214 100644 --- a/tests/test_dice_loss.py +++ b/tests/test_dice_loss.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.losses import DiceLoss -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASES = [ [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) diff --git a/tests/test_diffusion_model_unet.py b/tests/test_diffusion_model_unet.py index 7f764d85de..a7c823709d 100644 --- a/tests/test_diffusion_model_unet.py +++ b/tests/test_diffusion_model_unet.py @@ -23,7 +23,7 @@ from monai.networks import eval_mode from monai.networks.nets import DiffusionModelUNet from monai.utils import optional_import -from tests.utils import skip_if_downloading_fails, testing_data_config +from tests.test_utils import skip_if_downloading_fails, testing_data_config _, has_einops = optional_import("einops") diff --git a/tests/test_dints_mixop.py b/tests/test_dints_mixop.py index 683a8d1005..ea22b06f8b 100644 --- a/tests/test_dints_mixop.py +++ b/tests/test_dints_mixop.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.networks.nets.dints import Cell, MixedOp -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASES_3D = [ [ diff --git a/tests/test_dints_network.py b/tests/test_dints_network.py index 5ee4db7a4e..178b87a3dc 100644 --- a/tests/test_dints_network.py +++ b/tests/test_dints_network.py @@ -19,7 +19,7 @@ from monai.networks.nets import DiNTS, TopologyInstance, TopologySearch from monai.networks.nets.dints import Cell -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_script_save TEST_CASES_3D = [ [ diff --git a/tests/test_discriminator.py b/tests/test_discriminator.py index f615605e56..b13c825284 100644 --- a/tests/test_discriminator.py +++ b/tests/test_discriminator.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets import Discriminator -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASE_0 = [ {"in_shape": (1, 64, 64), "channels": (2, 4, 8), "strides": (2, 2, 2), "num_res_units": 0}, diff --git a/tests/test_distance_transform_edt.py b/tests/test_distance_transform_edt.py index cf5c253c0c..3e17eaabd2 100644 --- a/tests/test_distance_transform_edt.py +++ b/tests/test_distance_transform_edt.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import DistanceTransformEDT, DistanceTransformEDTd -from tests.utils import HAS_CUPY, assert_allclose, optional_import, skip_if_no_cuda +from tests.test_utils import HAS_CUPY, assert_allclose, optional_import, skip_if_no_cuda momorphology, has_cucim = optional_import("cucim.core.operations.morphology") ndimage, has_ndimage = optional_import("scipy.ndimage") diff --git a/tests/test_download_and_extract.py b/tests/test_download_and_extract.py index 439a11bbc1..0b5d632123 100644 --- a/tests/test_download_and_extract.py +++ b/tests/test_download_and_extract.py @@ -20,7 +20,7 @@ from parameterized import parameterized from monai.apps import download_and_extract, download_url, extractall -from tests.utils import SkipIfNoModule, skip_if_downloading_fails, skip_if_quick, testing_data_config +from tests.test_utils import SkipIfNoModule, skip_if_downloading_fails, skip_if_quick, testing_data_config @SkipIfNoModule("requests") diff --git a/tests/test_ds_loss.py b/tests/test_ds_loss.py index daa4ed1e5e..5f4daaae81 100644 --- a/tests/test_ds_loss.py +++ b/tests/test_ds_loss.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.losses import DeepSupervisionLoss, DiceCELoss, DiceFocalLoss, DiceLoss -from tests.utils import SkipIfBeforePyTorchVersion, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, test_script_save TEST_CASES_DICECE = [ [ diff --git a/tests/test_dynunet.py b/tests/test_dynunet.py index 7c4882fcbb..f7096b8970 100644 --- a/tests/test_dynunet.py +++ b/tests/test_dynunet.py @@ -22,7 +22,7 @@ from monai.networks import eval_mode from monai.networks.nets import DynUNet from monai.utils import optional_import -from tests.utils import assert_allclose, skip_if_no_cuda, skip_if_windows, test_script_save +from tests.test_utils import assert_allclose, skip_if_no_cuda, skip_if_windows, test_script_save InstanceNorm3dNVFuser, _ = optional_import("apex.normalization", name="InstanceNorm3dNVFuser") diff --git a/tests/test_dynunet_block.py b/tests/test_dynunet_block.py index 4d9e06670b..af15e268e1 100644 --- a/tests/test_dynunet_block.py +++ b/tests/test_dynunet_block.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.blocks.dynunet_block import UnetBasicBlock, UnetResBlock, UnetUpBlock, get_padding -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASE_RES_BASIC_BLOCK = [] for spatial_dims in range(2, 4): diff --git a/tests/test_efficientnet.py b/tests/test_efficientnet.py index c16526eaa3..92c7c667c3 100644 --- a/tests/test_efficientnet.py +++ b/tests/test_efficientnet.py @@ -28,7 +28,7 @@ get_efficientnet_image_size, ) from monai.utils import optional_import -from tests.utils import skip_if_downloading_fails, skip_if_quick, test_pretrained_networks, test_script_save +from tests.test_utils import skip_if_downloading_fails, skip_if_quick, test_pretrained_networks, test_script_save if TYPE_CHECKING: import torchvision diff --git a/tests/test_ensemble_evaluator.py b/tests/test_ensemble_evaluator.py index ad81d35d52..f5dc4bde52 100644 --- a/tests/test_ensemble_evaluator.py +++ b/tests/test_ensemble_evaluator.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.engines import EnsembleEvaluator -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [["pred_0", "pred_1", "pred_2", "pred_3", "pred_4"]] diff --git a/tests/test_ensure_tuple.py b/tests/test_ensure_tuple.py index ec8c92785a..e889f9bfc4 100644 --- a/tests/test_ensure_tuple.py +++ b/tests/test_ensure_tuple.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.utils.misc import ensure_tuple -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TESTS = [ ["test", ("test",)], diff --git a/tests/test_ensure_type.py b/tests/test_ensure_type.py index 00b01898b3..61c258b2b8 100644 --- a/tests/test_ensure_type.py +++ b/tests/test_ensure_type.py @@ -18,7 +18,7 @@ from monai.data import MetaTensor from monai.transforms import EnsureType -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestEnsureType(unittest.TestCase): diff --git a/tests/test_ensure_typed.py b/tests/test_ensure_typed.py index fe543347de..03ee33be7b 100644 --- a/tests/test_ensure_typed.py +++ b/tests/test_ensure_typed.py @@ -18,7 +18,7 @@ from monai.data import MetaTensor from monai.transforms import EnsureTyped -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestEnsureTyped(unittest.TestCase): diff --git a/tests/test_enum_bound_interp.py b/tests/test_enum_bound_interp.py index cd3119f91c..8101e85a92 100644 --- a/tests/test_enum_bound_interp.py +++ b/tests/test_enum_bound_interp.py @@ -14,7 +14,7 @@ import unittest from monai.utils import optional_import -from tests.utils import skip_if_no_cpp_extension +from tests.test_utils import skip_if_no_cpp_extension b, _ = optional_import("monai._C", name="BoundType") p, _ = optional_import("monai._C", name="InterpolationType") diff --git a/tests/test_evenly_divisible_all_gather_dist.py b/tests/test_evenly_divisible_all_gather_dist.py index f1d45ba48f..816563cce9 100644 --- a/tests/test_evenly_divisible_all_gather_dist.py +++ b/tests/test_evenly_divisible_all_gather_dist.py @@ -17,7 +17,7 @@ import torch.distributed as dist from monai.utils import evenly_divisible_all_gather -from tests.utils import DistCall, DistTestCase, assert_allclose +from tests.test_utils import DistCall, DistTestCase, assert_allclose class DistributedEvenlyDivisibleAllGather(DistTestCase): diff --git a/tests/test_fastmri_reader.py b/tests/test_fastmri_reader.py index 06c3954eae..f086146169 100644 --- a/tests/test_fastmri_reader.py +++ b/tests/test_fastmri_reader.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.apps.reconstruction.fastmri_reader import FastMRIReader -from tests.utils import SkipIfNoModule, assert_allclose +from tests.test_utils import SkipIfNoModule, assert_allclose TEST_CASE1 = [ { diff --git a/tests/test_fft_utils.py b/tests/test_fft_utils.py index 7c7035770a..44364afb63 100644 --- a/tests/test_fft_utils.py +++ b/tests/test_fft_utils.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.data.fft_utils import fftn_centered, ifftn_centered -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose # im = [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]] diff --git a/tests/test_fg_bg_to_indices.py b/tests/test_fg_bg_to_indices.py index a28c491333..05dfd45c7a 100644 --- a/tests/test_fg_bg_to_indices.py +++ b/tests/test_fg_bg_to_indices.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import FgBgToIndices -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS_CASES = [] for p in TEST_NDARRAYS: diff --git a/tests/test_fg_bg_to_indicesd.py b/tests/test_fg_bg_to_indicesd.py index c6dd2059f4..5034c987a3 100644 --- a/tests/test_fg_bg_to_indicesd.py +++ b/tests/test_fg_bg_to_indicesd.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import FgBgToIndicesd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TEST_CASES = [] for p in TEST_NDARRAYS: diff --git a/tests/test_fill_holes.py b/tests/test_fill_holes.py index 241f7f8254..7b36e63f60 100644 --- a/tests/test_fill_holes.py +++ b/tests/test_fill_holes.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import FillHoles -from tests.utils import TEST_NDARRAYS, assert_allclose, clone +from tests.test_utils import TEST_NDARRAYS, assert_allclose, clone grid_1_raw = [[1, 1, 1], [1, 0, 1], [1, 1, 1]] diff --git a/tests/test_fill_holesd.py b/tests/test_fill_holesd.py index 28c17b00ac..08e7c3e78f 100644 --- a/tests/test_fill_holesd.py +++ b/tests/test_fill_holesd.py @@ -18,7 +18,7 @@ from monai.transforms import FillHolesd from monai.utils.enums import CommonKeys -from tests.utils import TEST_NDARRAYS, assert_allclose, clone +from tests.test_utils import TEST_NDARRAYS, assert_allclose, clone grid_1_raw = [[1, 1, 1], [1, 0, 1], [1, 1, 1]] diff --git a/tests/test_fl_exchange_object.py b/tests/test_fl_exchange_object.py index dab4eae037..1698efa9ce 100644 --- a/tests/test_fl_exchange_object.py +++ b/tests/test_fl_exchange_object.py @@ -19,7 +19,7 @@ from monai.fl.utils.constants import WeightType from monai.fl.utils.exchange_object import ExchangeObject from monai.utils.module import optional_import -from tests.utils import SkipIfNoModule +from tests.test_utils import SkipIfNoModule models, has_torchvision = optional_import("torchvision.models") diff --git a/tests/test_fl_monai_algo.py b/tests/test_fl_monai_algo.py index c8cb3451fc..d9bfe78d9c 100644 --- a/tests/test_fl_monai_algo.py +++ b/tests/test_fl_monai_algo.py @@ -26,7 +26,7 @@ from monai.fl.utils.constants import ExtraItems from monai.fl.utils.exchange_object import ExchangeObject from monai.utils import path_to_uri -from tests.utils import SkipIfNoModule +from tests.test_utils import SkipIfNoModule _root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__))) _data_dir = os.path.join(_root_dir, "testing_data") diff --git a/tests/test_fl_monai_algo_dist.py b/tests/test_fl_monai_algo_dist.py index d8dbfa5339..84a2b11e4c 100644 --- a/tests/test_fl_monai_algo_dist.py +++ b/tests/test_fl_monai_algo_dist.py @@ -22,7 +22,7 @@ from monai.fl.utils.constants import ExtraItems from monai.fl.utils.exchange_object import ExchangeObject from monai.networks import get_state_dict -from tests.utils import DistCall, DistTestCase, SkipIfBeforePyTorchVersion, SkipIfNoModule, skip_if_no_cuda +from tests.test_utils import DistCall, DistTestCase, SkipIfBeforePyTorchVersion, SkipIfNoModule, skip_if_no_cuda _root_dir = os.path.abspath(pathjoin(os.path.dirname(__file__))) _data_dir = pathjoin(_root_dir, "testing_data") diff --git a/tests/test_fl_monai_algo_stats.py b/tests/test_fl_monai_algo_stats.py index 6e58f8af88..92fb3e7b1f 100644 --- a/tests/test_fl_monai_algo_stats.py +++ b/tests/test_fl_monai_algo_stats.py @@ -20,7 +20,7 @@ from monai.fl.client import MonaiAlgoStats from monai.fl.utils.constants import ExtraItems, FlStatistics from monai.fl.utils.exchange_object import ExchangeObject -from tests.utils import SkipIfNoModule +from tests.test_utils import SkipIfNoModule _root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__))) _data_dir = os.path.join(_root_dir, "testing_data") diff --git a/tests/test_flexible_unet.py b/tests/test_flexible_unet.py index 42baa28b71..f834d5d45f 100644 --- a/tests/test_flexible_unet.py +++ b/tests/test_flexible_unet.py @@ -27,7 +27,7 @@ ResNetFeatures, ) from monai.utils import optional_import -from tests.utils import SkipIfNoModule, skip_if_downloading_fails, skip_if_quick +from tests.test_utils import SkipIfNoModule, skip_if_downloading_fails, skip_if_quick torchvision, has_torchvision = optional_import("torchvision") PIL, has_pil = optional_import("PIL") diff --git a/tests/test_flip.py b/tests/test_flip.py index 789ec86920..b5b8d5494f 100644 --- a/tests/test_flip.py +++ b/tests/test_flip.py @@ -21,7 +21,13 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import Flip from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_DEVICES, TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import ( + TEST_DEVICES, + TEST_NDARRAYS_ALL, + NumpyImageTestCase2D, + assert_allclose, + test_local_inversion, +) INVALID_CASES = [("wrong_axis", ["s", 1], TypeError), ("not_numbers", "s", TypeError)] diff --git a/tests/test_flipd.py b/tests/test_flipd.py index 1df6d34056..95a453b865 100644 --- a/tests/test_flipd.py +++ b/tests/test_flipd.py @@ -22,7 +22,13 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import Flipd from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_DEVICES, TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import ( + TEST_DEVICES, + TEST_NDARRAYS_ALL, + NumpyImageTestCase2D, + assert_allclose, + test_local_inversion, +) INVALID_CASES = [("wrong_axis", ["s", 1], TypeError), ("not_numbers", "s", TypeError)] diff --git a/tests/test_focal_loss.py b/tests/test_focal_loss.py index 0bb8a078ae..9d9ed43101 100644 --- a/tests/test_focal_loss.py +++ b/tests/test_focal_loss.py @@ -21,7 +21,7 @@ from monai.losses import FocalLoss from monai.networks import one_hot -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASES = [] for device in ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]: diff --git a/tests/test_foreground_mask.py b/tests/test_foreground_mask.py index 1aa54f4d3a..b6c7d3a56c 100644 --- a/tests/test_foreground_mask.py +++ b/tests/test_foreground_mask.py @@ -18,7 +18,7 @@ from monai.transforms.intensity.array import ForegroundMask from monai.utils import min_version, optional_import, set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose skimage, has_skimage = optional_import("skimage", "0.19.0", min_version) set_determinism(1234) diff --git a/tests/test_foreground_maskd.py b/tests/test_foreground_maskd.py index dc7b6cfb24..48ef68e7c0 100644 --- a/tests/test_foreground_maskd.py +++ b/tests/test_foreground_maskd.py @@ -18,7 +18,7 @@ from monai.transforms.intensity.dictionary import ForegroundMaskd from monai.utils import min_version, optional_import, set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose skimage, has_skimage = optional_import("skimage", "0.19.0", min_version) set_determinism(1234) diff --git a/tests/test_fourier.py b/tests/test_fourier.py index 177fc280f7..73fea2cdb1 100644 --- a/tests/test_fourier.py +++ b/tests/test_fourier.py @@ -20,7 +20,7 @@ from monai.data.synthetic import create_test_image_2d, create_test_image_3d from monai.transforms import Fourier from monai.utils.misc import set_determinism -from tests.utils import SkipIfBeforePyTorchVersion, SkipIfNoModule +from tests.test_utils import SkipIfBeforePyTorchVersion, SkipIfNoModule TEST_CASES = [((128, 64),), ((64, 48, 80),)] diff --git a/tests/test_fpn_block.py b/tests/test_fpn_block.py index 969800e80a..b3894ebf6a 100644 --- a/tests/test_fpn_block.py +++ b/tests/test_fpn_block.py @@ -21,7 +21,7 @@ from monai.networks.blocks.feature_pyramid_network import FeaturePyramidNetwork from monai.networks.nets.resnet import resnet50 from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, test_script_save _, has_torchvision = optional_import("torchvision") diff --git a/tests/test_from_engine_hovernet.py b/tests/test_from_engine_hovernet.py index 7d1a784466..bed464ef49 100644 --- a/tests/test_from_engine_hovernet.py +++ b/tests/test_from_engine_hovernet.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.apps.pathology.handlers.utils import from_engine_hovernet -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_0 = [ [{"A": {"C": 1, "D": 2}, "B": {"C": 2, "D": 2}}, {"A": {"C": 3, "D": 2}, "B": {"C": 4, "D": 2}}], diff --git a/tests/test_gaussian_filter.py b/tests/test_gaussian_filter.py index 2167591c66..2d5c935f90 100644 --- a/tests/test_gaussian_filter.py +++ b/tests/test_gaussian_filter.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.networks.layers import GaussianFilter -from tests.utils import SkipIfAtLeastPyTorchVersion, skip_if_quick +from tests.test_utils import SkipIfAtLeastPyTorchVersion, skip_if_quick TEST_CASES = [[{"type": "erf", "gt": 2.0}], [{"type": "scalespace", "gt": 3.0}], [{"type": "sampled", "gt": 5.0}]] TEST_CASES_GPU = [[{"type": "erf", "gt": 0.8, "device": "cuda"}], [{"type": "sampled", "gt": 5.0, "device": "cuda"}]] diff --git a/tests/test_gaussian_sharpen.py b/tests/test_gaussian_sharpen.py index 392a7b376b..553038181f 100644 --- a/tests/test_gaussian_sharpen.py +++ b/tests/test_gaussian_sharpen.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import GaussianSharpen -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] diff --git a/tests/test_gaussian_sharpend.py b/tests/test_gaussian_sharpend.py index 15b219fd2c..38149dd25a 100644 --- a/tests/test_gaussian_sharpend.py +++ b/tests/test_gaussian_sharpend.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import GaussianSharpend -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_gaussian_smooth.py b/tests/test_gaussian_smooth.py index 9f99ebe0f8..e3a9e46e76 100644 --- a/tests/test_gaussian_smooth.py +++ b/tests/test_gaussian_smooth.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import GaussianSmooth -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] diff --git a/tests/test_gaussian_smoothd.py b/tests/test_gaussian_smoothd.py index a6de4a159b..4471d2fe94 100644 --- a/tests/test_gaussian_smoothd.py +++ b/tests/test_gaussian_smoothd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import GaussianSmoothd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_gdsdataset.py b/tests/test_gdsdataset.py index 5d2e2aa013..dda171ea3c 100644 --- a/tests/test_gdsdataset.py +++ b/tests/test_gdsdataset.py @@ -23,7 +23,7 @@ from monai.data import GDSDataset, json_hashing from monai.transforms import Compose, Flip, Identity, LoadImaged, SimulateDelayd, Transform from monai.utils import optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose, skip_if_no_cuda +from tests.test_utils import TEST_NDARRAYS, assert_allclose, skip_if_no_cuda _, has_cp = optional_import("cupy") nib, has_nib = optional_import("nibabel") diff --git a/tests/test_generalized_dice_focal_loss.py b/tests/test_generalized_dice_focal_loss.py index 65252611ca..93f9f6f6fa 100644 --- a/tests/test_generalized_dice_focal_loss.py +++ b/tests/test_generalized_dice_focal_loss.py @@ -17,7 +17,7 @@ import torch from monai.losses import FocalLoss, GeneralizedDiceFocalLoss, GeneralizedDiceLoss -from tests.utils import test_script_save +from tests.test_utils import test_script_save class TestGeneralizedDiceFocalLoss(unittest.TestCase): diff --git a/tests/test_generalized_dice_loss.py b/tests/test_generalized_dice_loss.py index 9706c2e746..23af96762f 100644 --- a/tests/test_generalized_dice_loss.py +++ b/tests/test_generalized_dice_loss.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.losses import GeneralizedDiceLoss -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASES = [ [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) diff --git a/tests/test_generalized_wasserstein_dice_loss.py b/tests/test_generalized_wasserstein_dice_loss.py index 6b9d57e831..3b56c1315e 100644 --- a/tests/test_generalized_wasserstein_dice_loss.py +++ b/tests/test_generalized_wasserstein_dice_loss.py @@ -20,7 +20,7 @@ import torch.optim as optim from monai.losses import GeneralizedWassersteinDiceLoss -from tests.utils import test_script_save +from tests.test_utils import test_script_save class TestGeneralizedWassersteinDiceLoss(unittest.TestCase): diff --git a/tests/test_generate_distance_map.py b/tests/test_generate_distance_map.py index 42f5664647..ded3a124dd 100644 --- a/tests/test_generate_distance_map.py +++ b/tests/test_generate_distance_map.py @@ -18,7 +18,7 @@ from monai.apps.pathology.transforms.post.array import GenerateDistanceMap from monai.transforms.intensity.array import GaussianSmooth -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS EXCEPTION_TESTS = [] TESTS = [] diff --git a/tests/test_generate_distance_mapd.py b/tests/test_generate_distance_mapd.py index 2bddadf5b8..04cfc2f776 100644 --- a/tests/test_generate_distance_mapd.py +++ b/tests/test_generate_distance_mapd.py @@ -18,7 +18,7 @@ from monai.apps.pathology.transforms.post.dictionary import GenerateDistanceMapd from monai.transforms.intensity.array import GaussianSmooth -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS EXCEPTION_TESTS = [] TESTS = [] diff --git a/tests/test_generate_instance_border.py b/tests/test_generate_instance_border.py index fc1035dfe5..9d9c5bc7d8 100644 --- a/tests/test_generate_instance_border.py +++ b/tests/test_generate_instance_border.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.apps.pathology.transforms.post.array import GenerateInstanceBorder -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS EXCEPTION_TESTS = [] TESTS = [] diff --git a/tests/test_generate_instance_borderd.py b/tests/test_generate_instance_borderd.py index cdfbee4193..1cbf99cee3 100644 --- a/tests/test_generate_instance_borderd.py +++ b/tests/test_generate_instance_borderd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.apps.pathology.transforms.post.dictionary import GenerateInstanceBorderd -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS EXCEPTION_TESTS = [] TESTS = [] diff --git a/tests/test_generate_instance_centroid.py b/tests/test_generate_instance_centroid.py index 6b4d533401..051d555dff 100644 --- a/tests/test_generate_instance_centroid.py +++ b/tests/test_generate_instance_centroid.py @@ -19,7 +19,7 @@ from monai.apps.pathology.transforms.post.array import GenerateInstanceCentroid from monai.transforms import BoundingRect from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_skimage = optional_import("skimage", "0.19.3", min_version) diff --git a/tests/test_generate_instance_centroidd.py b/tests/test_generate_instance_centroidd.py index d381ad8c0e..b3cee1872b 100644 --- a/tests/test_generate_instance_centroidd.py +++ b/tests/test_generate_instance_centroidd.py @@ -19,7 +19,7 @@ from monai.apps.pathology.transforms.post.dictionary import GenerateInstanceCentroidd from monai.transforms import BoundingRect from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_skimage = optional_import("skimage", "0.19.3", min_version) diff --git a/tests/test_generate_instance_contour.py b/tests/test_generate_instance_contour.py index 7f4290747d..0346536db9 100644 --- a/tests/test_generate_instance_contour.py +++ b/tests/test_generate_instance_contour.py @@ -19,7 +19,7 @@ from monai.apps.pathology.transforms.post.array import GenerateInstanceContour from monai.transforms import BoundingRect from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_skimage = optional_import("skimage", "0.19.3", min_version) diff --git a/tests/test_generate_instance_contourd.py b/tests/test_generate_instance_contourd.py index 5c831ee680..2a572e5932 100644 --- a/tests/test_generate_instance_contourd.py +++ b/tests/test_generate_instance_contourd.py @@ -19,7 +19,7 @@ from monai.apps.pathology.transforms.post.dictionary import GenerateInstanceContourd from monai.transforms import BoundingRect from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_skimage = optional_import("skimage", "0.19.3", min_version) diff --git a/tests/test_generate_instance_type.py b/tests/test_generate_instance_type.py index 24e1d1b6d0..6e86beafb5 100644 --- a/tests/test_generate_instance_type.py +++ b/tests/test_generate_instance_type.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.apps.pathology.transforms.post.array import GenerateInstanceType -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose y, x = np.ogrid[0:30, 0:30] diff --git a/tests/test_generate_instance_typed.py b/tests/test_generate_instance_typed.py index 958f68d6bb..6088d672de 100644 --- a/tests/test_generate_instance_typed.py +++ b/tests/test_generate_instance_typed.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.apps.pathology.transforms.post.dictionary import GenerateInstanceTyped -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose y, x = np.ogrid[0:30, 0:30] diff --git a/tests/test_generate_label_classes_crop_centers.py b/tests/test_generate_label_classes_crop_centers.py index 1cbb5f05c3..bfe65465e2 100644 --- a/tests/test_generate_label_classes_crop_centers.py +++ b/tests/test_generate_label_classes_crop_centers.py @@ -18,7 +18,7 @@ from monai.transforms import generate_label_classes_crop_centers from monai.utils.misc import set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TEST_CASE_1 = [ { diff --git a/tests/test_generate_param_groups.py b/tests/test_generate_param_groups.py index a78dba9f03..8c49a432b2 100644 --- a/tests/test_generate_param_groups.py +++ b/tests/test_generate_param_groups.py @@ -19,7 +19,7 @@ from monai.networks.nets import Unet from monai.optimizers import generate_param_groups from monai.utils import ensure_tuple -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [{"layer_matches": [lambda x: x.model[-1]], "match_types": "select", "lr_values": [1]}, (1, 100), [5, 21]] diff --git a/tests/test_generate_pos_neg_label_crop_centers.py b/tests/test_generate_pos_neg_label_crop_centers.py index de127b33df..80c179ffaf 100644 --- a/tests/test_generate_pos_neg_label_crop_centers.py +++ b/tests/test_generate_pos_neg_label_crop_centers.py @@ -18,7 +18,7 @@ from monai.transforms import generate_pos_neg_label_crop_centers from monai.utils.misc import set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [ [ diff --git a/tests/test_generate_spatial_bounding_box.py b/tests/test_generate_spatial_bounding_box.py index 6d5b415ec2..94cf1a58d7 100644 --- a/tests/test_generate_spatial_bounding_box.py +++ b/tests/test_generate_spatial_bounding_box.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import generate_spatial_bounding_box -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_generate_watershed_markers.py b/tests/test_generate_watershed_markers.py index 238fb00ee0..73f7851f0d 100644 --- a/tests/test_generate_watershed_markers.py +++ b/tests/test_generate_watershed_markers.py @@ -18,7 +18,7 @@ from monai.apps.pathology.transforms.post.array import GenerateWatershedMarkers from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS _, has_skimage = optional_import("skimage", "0.19.3", min_version) _, has_scipy = optional_import("scipy", "1.8.1", min_version) diff --git a/tests/test_generate_watershed_markersd.py b/tests/test_generate_watershed_markersd.py index a3c2b9c231..36ad113653 100644 --- a/tests/test_generate_watershed_markersd.py +++ b/tests/test_generate_watershed_markersd.py @@ -18,7 +18,7 @@ from monai.apps.pathology.transforms.post.dictionary import GenerateWatershedMarkersd from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS _, has_skimage = optional_import("skimage", "0.19.3", min_version) _, has_scipy = optional_import("scipy", "1.8.1", min_version) diff --git a/tests/test_generate_watershed_mask.py b/tests/test_generate_watershed_mask.py index 5224a912b0..b4728062db 100644 --- a/tests/test_generate_watershed_mask.py +++ b/tests/test_generate_watershed_mask.py @@ -19,7 +19,7 @@ from monai.apps.pathology.transforms.post.array import GenerateWatershedMask from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS _, has_scipy = optional_import("scipy", "1.8.1", min_version) diff --git a/tests/test_generate_watershed_maskd.py b/tests/test_generate_watershed_maskd.py index 9d0f2c274a..863e01be83 100644 --- a/tests/test_generate_watershed_maskd.py +++ b/tests/test_generate_watershed_maskd.py @@ -19,7 +19,7 @@ from monai.apps.pathology.transforms.post.dictionary import GenerateWatershedMaskd from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS _, has_scipy = optional_import("scipy", "1.8.1", min_version) diff --git a/tests/test_generator.py b/tests/test_generator.py index f531f928da..9c8bc33494 100644 --- a/tests/test_generator.py +++ b/tests/test_generator.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets import Generator -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASE_0 = [ {"latent_shape": (64,), "start_shape": (8, 8, 8), "channels": (8, 4, 1), "strides": (2, 2, 2), "num_res_units": 0}, diff --git a/tests/test_get_equivalent_dtype.py b/tests/test_get_equivalent_dtype.py index 2b4de1bc2a..497b2ab591 100644 --- a/tests/test_get_equivalent_dtype.py +++ b/tests/test_get_equivalent_dtype.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.utils.type_conversion import get_equivalent_dtype, get_numpy_dtype_from_string, get_torch_dtype_from_string -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS DTYPES = [torch.float32, np.float32, np.dtype(np.float32)] diff --git a/tests/test_get_extreme_points.py b/tests/test_get_extreme_points.py index e60715e2fe..0a062d5214 100644 --- a/tests/test_get_extreme_points.py +++ b/tests/test_get_extreme_points.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import get_extreme_points -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_get_unique_labels.py b/tests/test_get_unique_labels.py index 0a88145489..8735768902 100644 --- a/tests/test_get_unique_labels.py +++ b/tests/test_get_unique_labels.py @@ -19,7 +19,7 @@ from monai.transforms.utils import get_unique_labels from monai.transforms.utils_pytorch_numpy_unification import moveaxis -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS grid_raw = [[0, 0, 0], [0, 0, 1], [2, 2, 3], [5, 5, 6], [3, 6, 2], [5, 6, 6]] grid = torch.Tensor(grid_raw).unsqueeze(0).to(torch.int64) diff --git a/tests/test_gibbs_noise.py b/tests/test_gibbs_noise.py index bdc66b9495..145a1d10ac 100644 --- a/tests/test_gibbs_noise.py +++ b/tests/test_gibbs_noise.py @@ -21,7 +21,7 @@ from monai.transforms import GibbsNoise from monai.utils.misc import set_determinism from monai.utils.module import optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_torch_fft = optional_import("torch.fft", name="fftshift") diff --git a/tests/test_gibbs_noised.py b/tests/test_gibbs_noised.py index 3b2cae7e84..8c8cca513c 100644 --- a/tests/test_gibbs_noised.py +++ b/tests/test_gibbs_noised.py @@ -21,7 +21,7 @@ from monai.transforms import GibbsNoised from monai.utils.misc import set_determinism from monai.utils.module import optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_torch_fft = optional_import("torch.fft", name="fftshift") diff --git a/tests/test_global_mutual_information_loss.py b/tests/test_global_mutual_information_loss.py index 22f5e88431..fdbfa63c34 100644 --- a/tests/test_global_mutual_information_loss.py +++ b/tests/test_global_mutual_information_loss.py @@ -19,7 +19,7 @@ from monai import transforms from monai.losses.image_dissimilarity import GlobalMutualInformationLoss -from tests.utils import SkipIfBeforePyTorchVersion, download_url_or_skip_test, skip_if_quick, testing_data_config +from tests.test_utils import SkipIfBeforePyTorchVersion, download_url_or_skip_test, skip_if_quick, testing_data_config device = "cuda:0" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_globalnet.py b/tests/test_globalnet.py index 626053377c..aa8bd77312 100644 --- a/tests/test_globalnet.py +++ b/tests/test_globalnet.py @@ -21,7 +21,7 @@ from monai.networks.blocks import Warp from monai.networks.nets import GlobalNet from monai.networks.nets.regunet import AffineHead -from tests.utils import assert_allclose, test_script_save +from tests.test_utils import assert_allclose, test_script_save TEST_CASES_AFFINE_TRANSFORM = [ [ diff --git a/tests/test_gmm.py b/tests/test_gmm.py index 549e8f1ec4..e582f7668c 100644 --- a/tests/test_gmm.py +++ b/tests/test_gmm.py @@ -22,7 +22,7 @@ from monai._extensions import load_module from monai.networks.layers import GaussianMixtureModel -from tests.utils import skip_if_darwin, skip_if_no_cuda, skip_if_quick, skip_if_windows +from tests.test_utils import skip_if_darwin, skip_if_no_cuda, skip_if_quick, skip_if_windows TEST_CASES = [ [ diff --git a/tests/test_grid_dataset.py b/tests/test_grid_dataset.py index 4a3b4b6340..0ed7c1b263 100644 --- a/tests/test_grid_dataset.py +++ b/tests/test_grid_dataset.py @@ -20,7 +20,7 @@ from monai.data import DataLoader, GridPatchDataset, PatchIter, PatchIterd, iter_patch from monai.transforms import RandShiftIntensity, RandShiftIntensityd from monai.utils import set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose, get_arange_img +from tests.test_utils import TEST_NDARRAYS, assert_allclose, get_arange_img def identity_generator(x): diff --git a/tests/test_grid_distortion.py b/tests/test_grid_distortion.py index 9ec85250e8..e923d828f6 100644 --- a/tests/test_grid_distortion.py +++ b/tests/test_grid_distortion.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import GridDistortion -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_grid_distortiond.py b/tests/test_grid_distortiond.py index ce73593dc7..495403885c 100644 --- a/tests/test_grid_distortiond.py +++ b/tests/test_grid_distortiond.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import GridDistortiond -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS = [] num_cells = (2, 2) diff --git a/tests/test_grid_patch.py b/tests/test_grid_patch.py index 56af123548..ce2f6b6b92 100644 --- a/tests/test_grid_patch.py +++ b/tests/test_grid_patch.py @@ -19,7 +19,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms.spatial.array import GridPatch -from tests.utils import TEST_NDARRAYS, SkipIfBeforePyTorchVersion, assert_allclose +from tests.test_utils import TEST_NDARRAYS, SkipIfBeforePyTorchVersion, assert_allclose A = np.arange(16).repeat(3).reshape(4, 4, 3).transpose(2, 0, 1) A11 = A[:, :2, :2] diff --git a/tests/test_grid_patchd.py b/tests/test_grid_patchd.py index 53313b3a8f..26b340297d 100644 --- a/tests/test_grid_patchd.py +++ b/tests/test_grid_patchd.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms.spatial.dictionary import GridPatchd -from tests.utils import TEST_NDARRAYS, SkipIfBeforePyTorchVersion, assert_allclose +from tests.test_utils import TEST_NDARRAYS, SkipIfBeforePyTorchVersion, assert_allclose A = np.arange(16).repeat(3).reshape(4, 4, 3).transpose(2, 0, 1) A11 = A[:, :2, :2] diff --git a/tests/test_grid_pull.py b/tests/test_grid_pull.py index f80874d216..79f18f2b60 100644 --- a/tests/test_grid_pull.py +++ b/tests/test_grid_pull.py @@ -20,8 +20,8 @@ from monai.networks.layers import grid_pull from monai.networks.utils import meshgrid_ij from monai.utils import optional_import +from tests.test_utils import skip_if_no_cpp_extension from tests.testing_data.cpp_resample_answers import Expected_1D_GP_bwd, Expected_1D_GP_fwd -from tests.utils import skip_if_no_cpp_extension BType, has_b_type = optional_import("monai._C", name="BoundType") PType, has_p_type = optional_import("monai._C", name="InterpolationType") diff --git a/tests/test_grid_split.py b/tests/test_grid_split.py index 852a4847a6..e4a8571b47 100644 --- a/tests/test_grid_split.py +++ b/tests/test_grid_split.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import GridSplit -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose A11 = torch.randn(3, 2, 2) A12 = torch.randn(3, 2, 2) diff --git a/tests/test_grid_splitd.py b/tests/test_grid_splitd.py index 215076d5a3..2c39acdee0 100644 --- a/tests/test_grid_splitd.py +++ b/tests/test_grid_splitd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import GridSplitd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose A11 = torch.randn(3, 2, 2) A12 = torch.randn(3, 2, 2) diff --git a/tests/test_handler_checkpoint_loader.py b/tests/test_handler_checkpoint_loader.py index 7b281665b4..d366890ae6 100644 --- a/tests/test_handler_checkpoint_loader.py +++ b/tests/test_handler_checkpoint_loader.py @@ -19,7 +19,7 @@ from ignite.engine import Engine, Events from monai.handlers import CheckpointLoader, CheckpointSaver -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestHandlerCheckpointLoader(unittest.TestCase): diff --git a/tests/test_handler_classification_saver_dist.py b/tests/test_handler_classification_saver_dist.py index 47dca2d999..2e8edde05a 100644 --- a/tests/test_handler_classification_saver_dist.py +++ b/tests/test_handler_classification_saver_dist.py @@ -23,7 +23,7 @@ from monai.data import decollate_batch from monai.handlers import ClassificationSaver -from tests.utils import DistCall, DistTestCase +from tests.test_utils import DistCall, DistTestCase class DistributedHandlerClassificationSaver(DistTestCase): diff --git a/tests/test_handler_confusion_matrix.py b/tests/test_handler_confusion_matrix.py index 5f3ee3ae97..6ecc7c8250 100644 --- a/tests/test_handler_confusion_matrix.py +++ b/tests/test_handler_confusion_matrix.py @@ -19,7 +19,7 @@ from parameterized import parameterized from monai.handlers import ConfusionMatrix -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [{"include_background": True, "save_details": False, "metric_name": "f1"}, 0.75] TEST_CASE_2 = [{"include_background": False, "save_details": False, "metric_name": "ppv"}, 1.0] diff --git a/tests/test_handler_confusion_matrix_dist.py b/tests/test_handler_confusion_matrix_dist.py index dd30f04142..44d61a95b1 100644 --- a/tests/test_handler_confusion_matrix_dist.py +++ b/tests/test_handler_confusion_matrix_dist.py @@ -19,7 +19,7 @@ from ignite.engine import Engine from monai.handlers import ConfusionMatrix -from tests.utils import DistCall, DistTestCase +from tests.test_utils import DistCall, DistTestCase class DistributedConfusionMatrix(DistTestCase): diff --git a/tests/test_handler_decollate_batch.py b/tests/test_handler_decollate_batch.py index 37ca7f6870..d57b22d900 100644 --- a/tests/test_handler_decollate_batch.py +++ b/tests/test_handler_decollate_batch.py @@ -18,7 +18,7 @@ from monai.engines import SupervisedEvaluator from monai.handlers import DecollateBatch, PostProcessing from monai.transforms import Activationsd, AsDiscreted, Compose, CopyItemsd -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestHandlerDecollateBatch(unittest.TestCase): diff --git a/tests/test_handler_hausdorff_distance.py b/tests/test_handler_hausdorff_distance.py index 906db86d62..4e366d016a 100644 --- a/tests/test_handler_hausdorff_distance.py +++ b/tests/test_handler_hausdorff_distance.py @@ -18,7 +18,7 @@ from ignite.engine import Engine from monai.handlers import HausdorffDistance -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose def create_spherical_seg_3d( diff --git a/tests/test_handler_ignite_metric.py b/tests/test_handler_ignite_metric.py index 3e42bda35d..972b9928ba 100644 --- a/tests/test_handler_ignite_metric.py +++ b/tests/test_handler_ignite_metric.py @@ -19,7 +19,7 @@ from monai.handlers import IgniteMetricHandler, from_engine from monai.losses import DiceLoss from monai.metrics import LossMetric -from tests.utils import SkipIfNoModule, assert_allclose, optional_import +from tests.test_utils import SkipIfNoModule, assert_allclose, optional_import try: _, has_ignite = optional_import("ignite") diff --git a/tests/test_handler_logfile.py b/tests/test_handler_logfile.py index 457aca2ebc..eece744e42 100644 --- a/tests/test_handler_logfile.py +++ b/tests/test_handler_logfile.py @@ -18,7 +18,7 @@ import torch from monai.utils import optional_import -from tests.utils import SkipIfNoModule +from tests.test_utils import SkipIfNoModule try: _, has_ignite = optional_import("ignite") diff --git a/tests/test_handler_mean_dice.py b/tests/test_handler_mean_dice.py index 6f91b6d3af..abb016ca7b 100644 --- a/tests/test_handler_mean_dice.py +++ b/tests/test_handler_mean_dice.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.handlers import MeanDice, from_engine -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [{"include_background": True, "output_transform": from_engine(["pred", "label"])}, 0.75, (4, 2)] TEST_CASE_2 = [{"include_background": False, "output_transform": from_engine(["pred", "label"])}, 0.66666, (4, 1)] diff --git a/tests/test_handler_mean_iou.py b/tests/test_handler_mean_iou.py index 89dae3af58..432cd8a51b 100644 --- a/tests/test_handler_mean_iou.py +++ b/tests/test_handler_mean_iou.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.handlers import MeanIoUHandler, from_engine -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [{"include_background": True, "output_transform": from_engine(["pred", "label"])}, 0.75, (4, 2)] TEST_CASE_2 = [{"include_background": False, "output_transform": from_engine(["pred", "label"])}, 2 / 3, (4, 1)] diff --git a/tests/test_handler_metric_logger.py b/tests/test_handler_metric_logger.py index 06d50e97ff..35c32fa42b 100644 --- a/tests/test_handler_metric_logger.py +++ b/tests/test_handler_metric_logger.py @@ -16,7 +16,7 @@ import torch from monai.utils import optional_import -from tests.utils import SkipIfNoModule +from tests.test_utils import SkipIfNoModule try: _, has_ignite = optional_import("ignite") diff --git a/tests/test_handler_metrics_reloaded.py b/tests/test_handler_metrics_reloaded.py index b8fb39d2e8..65e8726c88 100644 --- a/tests/test_handler_metrics_reloaded.py +++ b/tests/test_handler_metrics_reloaded.py @@ -19,7 +19,7 @@ from monai.handlers import MetricsReloadedBinaryHandler, MetricsReloadedCategoricalHandler, from_engine from monai.utils import optional_import -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose _, has_metrics = optional_import("MetricsReloaded") diff --git a/tests/test_handler_metrics_saver_dist.py b/tests/test_handler_metrics_saver_dist.py index 2e12b08aa9..7140b94327 100644 --- a/tests/test_handler_metrics_saver_dist.py +++ b/tests/test_handler_metrics_saver_dist.py @@ -23,7 +23,7 @@ from monai.handlers import MetricsSaver from monai.utils import evenly_divisible_all_gather from monai.utils.enums import PostFix -from tests.utils import DistCall, DistTestCase +from tests.test_utils import DistCall, DistTestCase class DistributedMetricsSaver(DistTestCase): diff --git a/tests/test_handler_mlflow.py b/tests/test_handler_mlflow.py index 36d59ff1bf..f717e0e88c 100644 --- a/tests/test_handler_mlflow.py +++ b/tests/test_handler_mlflow.py @@ -27,7 +27,7 @@ from monai.bundle import ConfigWorkflow, download from monai.handlers import MLFlowHandler from monai.utils import optional_import, path_to_uri -from tests.utils import skip_if_downloading_fails, skip_if_quick +from tests.test_utils import skip_if_downloading_fails, skip_if_quick _, has_dataset_tracking = optional_import("mlflow", "2.4.0") diff --git a/tests/test_handler_nvtx.py b/tests/test_handler_nvtx.py index a0d1cdb4d5..9ad55f67e8 100644 --- a/tests/test_handler_nvtx.py +++ b/tests/test_handler_nvtx.py @@ -21,7 +21,7 @@ from monai.handlers import StatsHandler, from_engine from monai.handlers.nvtx_handlers import MarkHandler, RangeHandler, RangePopHandler, RangePushHandler from monai.utils import optional_import -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose _, has_nvtx = optional_import("torch._C._nvtx", descriptor="NVTX is not installed. Are you sure you have a CUDA build?") diff --git a/tests/test_handler_panoptic_quality.py b/tests/test_handler_panoptic_quality.py index 337f9c7b49..868ae45e21 100644 --- a/tests/test_handler_panoptic_quality.py +++ b/tests/test_handler_panoptic_quality.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.handlers import PanopticQuality, from_engine -from tests.utils import SkipIfNoModule, assert_allclose +from tests.test_utils import SkipIfNoModule, assert_allclose sample_1_pred = torch.as_tensor( [[[0, 1, 1, 1], [0, 0, 5, 5], [2, 0, 3, 3], [2, 2, 2, 0]], [[0, 1, 1, 1], [0, 0, 0, 0], [2, 0, 3, 3], [4, 2, 2, 0]]] diff --git a/tests/test_handler_parameter_scheduler.py b/tests/test_handler_parameter_scheduler.py index 0bcc794381..3c91622b90 100644 --- a/tests/test_handler_parameter_scheduler.py +++ b/tests/test_handler_parameter_scheduler.py @@ -17,7 +17,7 @@ from torch.nn import Module from monai.handlers.parameter_scheduler import ParamSchedulerHandler -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class ToyNet(Module): diff --git a/tests/test_handler_post_processing.py b/tests/test_handler_post_processing.py index 0dd518325b..a0e2a8ca0f 100644 --- a/tests/test_handler_post_processing.py +++ b/tests/test_handler_post_processing.py @@ -19,7 +19,7 @@ from monai.engines import SupervisedEvaluator from monai.handlers import PostProcessing from monai.transforms import Activationsd, AsDiscreted, Compose, CopyItemsd -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose # test lambda function as `transform` TEST_CASE_1 = [{"transform": lambda x: dict(pred=x["pred"] + 1.0)}, False, torch.tensor([[[[1.9975], [1.9997]]]])] diff --git a/tests/test_handler_regression_metrics_dist.py b/tests/test_handler_regression_metrics_dist.py index f57db429e8..8a455d0470 100644 --- a/tests/test_handler_regression_metrics_dist.py +++ b/tests/test_handler_regression_metrics_dist.py @@ -20,7 +20,7 @@ from monai.handlers import MeanAbsoluteError, MeanSquaredError, PeakSignalToNoiseRatio, RootMeanSquaredError from monai.utils import set_determinism -from tests.utils import DistCall, DistTestCase +from tests.test_utils import DistCall, DistTestCase # define a numpy flatten function that only preserves batch dimension diff --git a/tests/test_handler_rocauc_dist.py b/tests/test_handler_rocauc_dist.py index 6088251b11..544653f037 100644 --- a/tests/test_handler_rocauc_dist.py +++ b/tests/test_handler_rocauc_dist.py @@ -19,7 +19,7 @@ from monai.handlers import ROCAUC from monai.transforms import Activations, AsDiscrete -from tests.utils import DistCall, DistTestCase +from tests.test_utils import DistCall, DistTestCase class DistributedROCAUC(DistTestCase): diff --git a/tests/test_handler_surface_distance.py b/tests/test_handler_surface_distance.py index 736f7e251a..0253c1a8a4 100644 --- a/tests/test_handler_surface_distance.py +++ b/tests/test_handler_surface_distance.py @@ -18,7 +18,7 @@ from ignite.engine import Engine from monai.handlers import SurfaceDistance -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose def create_spherical_seg_3d( diff --git a/tests/test_handler_tb_image.py b/tests/test_handler_tb_image.py index 197b175278..b01ef1b26f 100644 --- a/tests/test_handler_tb_image.py +++ b/tests/test_handler_tb_image.py @@ -23,7 +23,7 @@ from monai.data import decollate_batch from monai.handlers import TensorBoardImageHandler from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion _, has_tb = optional_import("torch.utils.tensorboard", name="SummaryWriter") diff --git a/tests/test_hardnegsampler.py b/tests/test_hardnegsampler.py index 5385abd1db..a0a2743bf7 100644 --- a/tests/test_hardnegsampler.py +++ b/tests/test_hardnegsampler.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.apps.detection.utils.hard_negative_sampler import HardNegativeSampler -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE = [ [[], [], [], [torch.tensor([]), torch.tensor([])], [torch.tensor([]), torch.tensor([])]], diff --git a/tests/test_highresnet.py b/tests/test_highresnet.py index bcc5739900..bf95f4579a 100644 --- a/tests/test_highresnet.py +++ b/tests/test_highresnet.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets import HighResNet -from tests.utils import DistTestCase, TimedCall, test_script_save +from tests.test_utils import DistTestCase, TimedCall, test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_hilbert_transform.py b/tests/test_hilbert_transform.py index b91ba3f6b7..d484e230dd 100644 --- a/tests/test_hilbert_transform.py +++ b/tests/test_hilbert_transform.py @@ -19,7 +19,7 @@ from monai.networks.layers import HilbertTransform from monai.utils import OptionalImportError -from tests.utils import SkipIfModule, SkipIfNoModule +from tests.test_utils import SkipIfModule, SkipIfNoModule def create_expected_numpy_output(input_datum, **kwargs): diff --git a/tests/test_histogram_normalize.py b/tests/test_histogram_normalize.py index 25c0afb64d..7f3abf63f5 100644 --- a/tests/test_histogram_normalize.py +++ b/tests/test_histogram_normalize.py @@ -18,7 +18,7 @@ from monai.transforms import HistogramNormalize from monai.utils import get_equivalent_dtype -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_histogram_normalized.py b/tests/test_histogram_normalized.py index a390375441..ceadb66b74 100644 --- a/tests/test_histogram_normalized.py +++ b/tests/test_histogram_normalized.py @@ -18,7 +18,7 @@ from monai.transforms import HistogramNormalized from monai.utils import get_equivalent_dtype -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_hovernet.py b/tests/test_hovernet.py index fb4946b011..a664bfe1a7 100644 --- a/tests/test_hovernet.py +++ b/tests/test_hovernet.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode, train_mode from monai.networks.nets import HoVerNet from monai.networks.nets.hovernet import _DenseLayerDecoder -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_hovernet_instance_map_post_processing.py b/tests/test_hovernet_instance_map_post_processing.py index ce272fba1a..4e939eba0f 100644 --- a/tests/test_hovernet_instance_map_post_processing.py +++ b/tests/test_hovernet_instance_map_post_processing.py @@ -19,7 +19,7 @@ from monai.apps.pathology.transforms.post.array import HoVerNetInstanceMapPostProcessing from monai.transforms import ComputeHoVerMaps, FillHoles, GaussianSmooth from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_scipy = optional_import("scipy", "1.8.1", min_version) _, has_skimage = optional_import("skimage", "0.19.3", min_version) diff --git a/tests/test_hovernet_instance_map_post_processingd.py b/tests/test_hovernet_instance_map_post_processingd.py index c982156caa..2963e4fa39 100644 --- a/tests/test_hovernet_instance_map_post_processingd.py +++ b/tests/test_hovernet_instance_map_post_processingd.py @@ -20,7 +20,7 @@ from monai.transforms import ComputeHoVerMaps, FillHoles, GaussianSmooth from monai.utils import min_version, optional_import from monai.utils.enums import HoVerNetBranch -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_scipy = optional_import("scipy", "1.8.1", min_version) _, has_skimage = optional_import("skimage", "0.19.3", min_version) diff --git a/tests/test_hovernet_nuclear_type_post_processing.py b/tests/test_hovernet_nuclear_type_post_processing.py index e97b7abd2c..77e0ab9a2c 100644 --- a/tests/test_hovernet_nuclear_type_post_processing.py +++ b/tests/test_hovernet_nuclear_type_post_processing.py @@ -22,7 +22,7 @@ ) from monai.transforms import ComputeHoVerMaps from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_scipy = optional_import("scipy", "1.8.1", min_version) _, has_skimage = optional_import("skimage", "0.19.3", min_version) diff --git a/tests/test_hovernet_nuclear_type_post_processingd.py b/tests/test_hovernet_nuclear_type_post_processingd.py index 26cf80592c..89ab730211 100644 --- a/tests/test_hovernet_nuclear_type_post_processingd.py +++ b/tests/test_hovernet_nuclear_type_post_processingd.py @@ -23,7 +23,7 @@ from monai.transforms import ComputeHoVerMaps from monai.utils import min_version, optional_import from monai.utils.enums import HoVerNetBranch -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_scipy = optional_import("scipy", "1.8.1", min_version) _, has_skimage = optional_import("skimage", "0.19.3", min_version) diff --git a/tests/test_identity.py b/tests/test_identity.py index 4243a7f19a..4865781c52 100644 --- a/tests/test_identity.py +++ b/tests/test_identity.py @@ -14,7 +14,7 @@ import unittest from monai.transforms.utility.array import Identity -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestIdentity(NumpyImageTestCase2D): diff --git a/tests/test_identityd.py b/tests/test_identityd.py index 6b81ad9f16..49d7d92216 100644 --- a/tests/test_identityd.py +++ b/tests/test_identityd.py @@ -14,7 +14,7 @@ import unittest from monai.transforms.utility.dictionary import Identityd -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestIdentityd(NumpyImageTestCase2D): diff --git a/tests/test_image_rw.py b/tests/test_image_rw.py index 7e1c1deecc..20db7ca640 100644 --- a/tests/test_image_rw.py +++ b/tests/test_image_rw.py @@ -26,7 +26,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import LoadImage, SaveImage, moveaxis from monai.utils import MetaKeys, OptionalImportError, optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_itk = optional_import("itk", allow_namespace_pkg=True) diff --git a/tests/test_init_reader.py b/tests/test_init_reader.py index 8331f742ec..cf73b84766 100644 --- a/tests/test_init_reader.py +++ b/tests/test_init_reader.py @@ -15,7 +15,7 @@ from monai.data import ITKReader, NibabelReader, NrrdReader, NumpyReader, PILReader, PydicomReader from monai.transforms import LoadImage, LoadImaged -from tests.utils import SkipIfNoModule +from tests.test_utils import SkipIfNoModule class TestInitLoadImage(unittest.TestCase): diff --git a/tests/test_integration_autorunner.py b/tests/test_integration_autorunner.py index 31a0813abc..5b761271d6 100644 --- a/tests/test_integration_autorunner.py +++ b/tests/test_integration_autorunner.py @@ -23,7 +23,7 @@ from monai.bundle.config_parser import ConfigParser from monai.data import create_test_image_3d from monai.utils import optional_import -from tests.utils import ( +from tests.test_utils import ( SkipIfBeforePyTorchVersion, get_testing_algo_template_path, skip_if_downloading_fails, diff --git a/tests/test_integration_bundle_run.py b/tests/test_integration_bundle_run.py index 60aaef05bf..eec7504566 100644 --- a/tests/test_integration_bundle_run.py +++ b/tests/test_integration_bundle_run.py @@ -29,7 +29,7 @@ from monai.bundle.utils import DEFAULT_HANDLERS_ID from monai.transforms import LoadImage from monai.utils import path_to_uri -from tests.utils import command_line_tests +from tests.test_utils import command_line_tests TEST_CASE_1 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.json"), (128, 128, 128)] diff --git a/tests/test_integration_classification_2d.py b/tests/test_integration_classification_2d.py index b137fc9b75..bcf686f8d9 100644 --- a/tests/test_integration_classification_2d.py +++ b/tests/test_integration_classification_2d.py @@ -38,8 +38,8 @@ Transpose, ) from monai.utils import set_determinism +from tests.test_utils import DistTestCase, TimedCall, skip_if_downloading_fails, skip_if_quick, testing_data_config from tests.testing_data.integration_answers import test_integration_value -from tests.utils import DistTestCase, TimedCall, skip_if_downloading_fails, skip_if_quick, testing_data_config TASK = "integration_classification_2d" diff --git a/tests/test_integration_determinism.py b/tests/test_integration_determinism.py index 3e88f05620..37dcf013fc 100644 --- a/tests/test_integration_determinism.py +++ b/tests/test_integration_determinism.py @@ -22,7 +22,7 @@ from monai.networks.nets import UNet from monai.transforms import Compose, EnsureChannelFirst, RandRotate90, RandSpatialCrop, ScaleIntensity from monai.utils import set_determinism -from tests.utils import DistTestCase, TimedCall +from tests.test_utils import DistTestCase, TimedCall def run_test(batch_size=64, train_steps=200, device="cuda:0"): diff --git a/tests/test_integration_fast_train.py b/tests/test_integration_fast_train.py index 071eb5cf78..f00aeab9a5 100644 --- a/tests/test_integration_fast_train.py +++ b/tests/test_integration_fast_train.py @@ -52,7 +52,7 @@ ToDeviced, ) from monai.utils import set_determinism -from tests.utils import DistTestCase, TimedCall, skip_if_no_cuda, skip_if_quick +from tests.test_utils import DistTestCase, TimedCall, skip_if_no_cuda, skip_if_quick @skip_if_no_cuda diff --git a/tests/test_integration_gpu_customization.py b/tests/test_integration_gpu_customization.py index 043405a580..fc90837a1a 100644 --- a/tests/test_integration_gpu_customization.py +++ b/tests/test_integration_gpu_customization.py @@ -24,7 +24,7 @@ from monai.data import create_test_image_3d from monai.utils import optional_import from monai.utils.enums import AlgoKeys -from tests.utils import ( +from tests.test_utils import ( SkipIfBeforePyTorchVersion, get_testing_algo_template_path, skip_if_downloading_fails, diff --git a/tests/test_integration_lazy_samples.py b/tests/test_integration_lazy_samples.py index 51d80e7305..3be4bbe36e 100644 --- a/tests/test_integration_lazy_samples.py +++ b/tests/test_integration_lazy_samples.py @@ -26,7 +26,7 @@ from monai.data import create_test_image_3d, decollate_batch from monai.transforms.utils import has_status_keys from monai.utils import TraceStatusKeys, set_determinism -from tests.utils import HAS_CUPY, DistTestCase, SkipIfBeforePyTorchVersion, skip_if_quick +from tests.test_utils import HAS_CUPY, DistTestCase, SkipIfBeforePyTorchVersion, skip_if_quick def _no_op(x): diff --git a/tests/test_integration_nnunetv2_runner.py b/tests/test_integration_nnunetv2_runner.py index 822d454f52..7c9e2c386c 100644 --- a/tests/test_integration_nnunetv2_runner.py +++ b/tests/test_integration_nnunetv2_runner.py @@ -22,7 +22,7 @@ from monai.bundle.config_parser import ConfigParser from monai.data import create_test_image_3d from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_downloading_fails, skip_if_no_cuda, skip_if_quick +from tests.test_utils import SkipIfBeforePyTorchVersion, skip_if_downloading_fails, skip_if_no_cuda, skip_if_quick _, has_tb = optional_import("torch.utils.tensorboard", name="SummaryWriter") _, has_nnunet = optional_import("nnunetv2") diff --git a/tests/test_integration_segmentation_3d.py b/tests/test_integration_segmentation_3d.py index c72369b151..8176489c2b 100644 --- a/tests/test_integration_segmentation_3d.py +++ b/tests/test_integration_segmentation_3d.py @@ -41,8 +41,8 @@ ) from monai.utils import optional_import, set_determinism from monai.visualize import plot_2d_or_3d_image +from tests.test_utils import DistTestCase, TimedCall, skip_if_quick from tests.testing_data.integration_answers import test_integration_value -from tests.utils import DistTestCase, TimedCall, skip_if_quick SummaryWriter, _ = optional_import("torch.utils.tensorboard", name="SummaryWriter") diff --git a/tests/test_integration_sliding_window.py b/tests/test_integration_sliding_window.py index 8b53e94941..29d2e6f107 100644 --- a/tests/test_integration_sliding_window.py +++ b/tests/test_integration_sliding_window.py @@ -27,7 +27,7 @@ from monai.networks.nets import UNet from monai.transforms import EnsureChannelFirst, SaveImage from monai.utils import pytorch_after, set_determinism -from tests.utils import DistTestCase, TimedCall, make_nifti_image, skip_if_quick +from tests.test_utils import DistTestCase, TimedCall, make_nifti_image, skip_if_quick def run_test(batch_size, img_name, seg_name, output_dir, device="cuda:0"): diff --git a/tests/test_integration_stn.py b/tests/test_integration_stn.py index 750a20ea5c..579afc2eb9 100644 --- a/tests/test_integration_stn.py +++ b/tests/test_integration_stn.py @@ -22,7 +22,7 @@ from monai.data import create_test_image_2d from monai.networks.layers import AffineTransform from monai.utils import set_determinism -from tests.utils import DistTestCase, TimedCall +from tests.test_utils import DistTestCase, TimedCall class STNBenchmark(nn.Module): diff --git a/tests/test_integration_unet_2d.py b/tests/test_integration_unet_2d.py index 3b40682de0..45723f53ca 100644 --- a/tests/test_integration_unet_2d.py +++ b/tests/test_integration_unet_2d.py @@ -21,7 +21,7 @@ from monai.data import create_test_image_2d from monai.losses import DiceLoss from monai.networks.nets import BasicUNet, UNet -from tests.utils import DistTestCase, TimedCall, skip_if_quick +from tests.test_utils import DistTestCase, TimedCall, skip_if_quick def run_test(net_name="basicunet", batch_size=64, train_steps=100, device="cuda:0"): diff --git a/tests/test_integration_workers.py b/tests/test_integration_workers.py index 123b1ddc6f..83dd023eaf 100644 --- a/tests/test_integration_workers.py +++ b/tests/test_integration_workers.py @@ -18,7 +18,7 @@ from monai.data import DataLoader from monai.utils import set_determinism -from tests.utils import DistTestCase, SkipIfBeforePyTorchVersion, TimedCall, skip_if_no_cuda, skip_if_quick +from tests.test_utils import DistTestCase, SkipIfBeforePyTorchVersion, TimedCall, skip_if_no_cuda, skip_if_quick def run_loading_test(num_workers=50, device=None, pw=False): diff --git a/tests/test_integration_workflows.py b/tests/test_integration_workflows.py index fafb66f675..3fbdcca078 100644 --- a/tests/test_integration_workflows.py +++ b/tests/test_integration_workflows.py @@ -53,8 +53,8 @@ ScaleIntensityd, ) from monai.utils import optional_import, set_determinism +from tests.test_utils import DistTestCase, TimedCall, assert_allclose, pytorch_after, skip_if_quick from tests.testing_data.integration_answers import test_integration_value -from tests.utils import DistTestCase, TimedCall, assert_allclose, pytorch_after, skip_if_quick SummaryWriter, _ = optional_import("torch.utils.tensorboard", name="SummaryWriter") diff --git a/tests/test_integration_workflows_adversarial.py b/tests/test_integration_workflows_adversarial.py index f323fc9917..5badafcc41 100644 --- a/tests/test_integration_workflows_adversarial.py +++ b/tests/test_integration_workflows_adversarial.py @@ -28,7 +28,7 @@ from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, RandFlipd, ScaleIntensityd from monai.utils import AdversarialKeys as Keys from monai.utils import CommonKeys, optional_import, set_determinism -from tests.utils import DistTestCase, TimedCall, skip_if_quick +from tests.test_utils import DistTestCase, TimedCall, skip_if_quick nib, has_nibabel = optional_import("nibabel") diff --git a/tests/test_integration_workflows_gan.py b/tests/test_integration_workflows_gan.py index 1428506020..a03fecbf3e 100644 --- a/tests/test_integration_workflows_gan.py +++ b/tests/test_integration_workflows_gan.py @@ -30,7 +30,7 @@ from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, RandFlipd, ScaleIntensityd from monai.utils import GanKeys as Keys from monai.utils import set_determinism -from tests.utils import DistTestCase, TimedCall, skip_if_quick +from tests.test_utils import DistTestCase, TimedCall, skip_if_quick def run_training_test(root_dir, device="cuda:0"): diff --git a/tests/test_intensity_stats.py b/tests/test_intensity_stats.py index e45c2acbad..ca3a440cb6 100644 --- a/tests/test_intensity_stats.py +++ b/tests/test_intensity_stats.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import IntensityStats -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_inverse.py b/tests/test_inverse.py index 6bd14a19f1..01d32e4baf 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -69,7 +69,7 @@ reset_ops_id, ) from monai.utils import first, get_seed, optional_import, set_determinism -from tests.utils import make_nifti_image, make_rand_affine +from tests.test_utils import make_nifti_image, make_rand_affine if TYPE_CHECKING: has_nib = True diff --git a/tests/test_inverse_array.py b/tests/test_inverse_array.py index 4da9ee34b9..140f03c110 100644 --- a/tests/test_inverse_array.py +++ b/tests/test_inverse_array.py @@ -20,7 +20,7 @@ from monai.transforms import Compose, EnsureChannelFirst, Flip, Orientation, Spacing from monai.transforms.inverse import InvertibleTransform from monai.utils import optional_import -from tests.utils import TEST_DEVICES +from tests.test_utils import TEST_DEVICES _, has_nib = optional_import("nibabel") diff --git a/tests/test_inverse_collation.py b/tests/test_inverse_collation.py index bf3972e6bd..e12c00611f 100644 --- a/tests/test_inverse_collation.py +++ b/tests/test_inverse_collation.py @@ -43,7 +43,7 @@ Rotated, ) from monai.utils import optional_import, set_determinism -from tests.utils import make_nifti_image +from tests.test_utils import make_nifti_image if TYPE_CHECKING: has_nib = True diff --git a/tests/test_invert.py b/tests/test_invert.py index 69d31edfc8..521207948e 100644 --- a/tests/test_invert.py +++ b/tests/test_invert.py @@ -37,7 +37,7 @@ Spacing, ) from monai.utils import set_determinism -from tests.utils import assert_allclose, make_nifti_image +from tests.test_utils import assert_allclose, make_nifti_image class TestInvert(unittest.TestCase): diff --git a/tests/test_invertd.py b/tests/test_invertd.py index f6e8fc40e7..af6bffb696 100644 --- a/tests/test_invertd.py +++ b/tests/test_invertd.py @@ -37,7 +37,7 @@ Spacingd, ) from monai.utils import set_determinism -from tests.utils import assert_allclose, make_nifti_image +from tests.test_utils import assert_allclose, make_nifti_image KEYS = ["image", "label"] diff --git a/tests/test_itk_torch_bridge.py b/tests/test_itk_torch_bridge.py index 22ae019271..ca73f12174 100644 --- a/tests/test_itk_torch_bridge.py +++ b/tests/test_itk_torch_bridge.py @@ -35,7 +35,13 @@ from monai.networks.blocks import Warp from monai.transforms import Affine from monai.utils import optional_import, set_determinism -from tests.utils import assert_allclose, skip_if_downloading_fails, skip_if_quick, test_is_quick, testing_data_config +from tests.test_utils import ( + assert_allclose, + skip_if_downloading_fails, + skip_if_quick, + test_is_quick, + testing_data_config, +) itk, has_itk = optional_import("itk") _, has_nib = optional_import("nibabel") diff --git a/tests/test_k_space_spike_noise.py b/tests/test_k_space_spike_noise.py index 17acedf319..3101d5c1c0 100644 --- a/tests/test_k_space_spike_noise.py +++ b/tests/test_k_space_spike_noise.py @@ -22,7 +22,7 @@ from monai.data.synthetic import create_test_image_2d, create_test_image_3d from monai.transforms import KSpaceSpikeNoise from monai.utils.misc import set_determinism -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS = [] for shape in ((128, 64), (64, 48, 80)): diff --git a/tests/test_k_space_spike_noised.py b/tests/test_k_space_spike_noised.py index ce542af0aa..aa52217ac2 100644 --- a/tests/test_k_space_spike_noised.py +++ b/tests/test_k_space_spike_noised.py @@ -22,7 +22,7 @@ from monai.data.synthetic import create_test_image_2d, create_test_image_3d from monai.transforms import KSpaceSpikeNoised from monai.utils.misc import set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for shape in ((128, 64), (64, 48, 80)): diff --git a/tests/test_keep_largest_connected_component.py b/tests/test_keep_largest_connected_component.py index 2dfac1142e..7b2d81a88b 100644 --- a/tests/test_keep_largest_connected_component.py +++ b/tests/test_keep_largest_connected_component.py @@ -21,7 +21,7 @@ from monai.transforms import KeepLargestConnectedComponent from monai.transforms.utils_pytorch_numpy_unification import moveaxis from monai.utils.type_conversion import convert_to_dst_type -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose def to_onehot(x): diff --git a/tests/test_keep_largest_connected_componentd.py b/tests/test_keep_largest_connected_componentd.py index 4d3172741d..22f289768c 100644 --- a/tests/test_keep_largest_connected_componentd.py +++ b/tests/test_keep_largest_connected_componentd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import KeepLargestConnectedComponentd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose grid_1 = [[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 1, 0], [2, 2, 0, 0, 2]]] grid_2 = [[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [1, 0, 1, 1, 2], [1, 0, 1, 2, 2], [0, 0, 0, 0, 1]]] diff --git a/tests/test_label_filter.py b/tests/test_label_filter.py index 93cf95a2a0..036219b42d 100644 --- a/tests/test_label_filter.py +++ b/tests/test_label_filter.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import LabelFilter -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose grid_1 = torch.tensor([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]) diff --git a/tests/test_label_filterd.py b/tests/test_label_filterd.py index fba8100f25..cff540567c 100644 --- a/tests/test_label_filterd.py +++ b/tests/test_label_filterd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms.post.dictionary import LabelFilterd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose grid_1 = torch.tensor([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]) diff --git a/tests/test_label_to_contour.py b/tests/test_label_to_contour.py index d7fbfc9b8d..07f600e2f8 100644 --- a/tests/test_label_to_contour.py +++ b/tests/test_label_to_contour.py @@ -17,7 +17,7 @@ import torch from monai.transforms import LabelToContour -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose expected_output_for_cube = [ [ diff --git a/tests/test_label_to_contourd.py b/tests/test_label_to_contourd.py index a91a712da6..157bbadbbd 100644 --- a/tests/test_label_to_contourd.py +++ b/tests/test_label_to_contourd.py @@ -17,7 +17,7 @@ import torch from monai.transforms import LabelToContourd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose expected_output_for_cube = [ [ diff --git a/tests/test_label_to_mask.py b/tests/test_label_to_mask.py index 47a58cc989..f31bd71158 100644 --- a/tests/test_label_to_mask.py +++ b/tests/test_label_to_mask.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import LabelToMask -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_label_to_maskd.py b/tests/test_label_to_maskd.py index 44b537128d..521853116e 100644 --- a/tests/test_label_to_maskd.py +++ b/tests/test_label_to_maskd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import LabelToMaskd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_lambda.py b/tests/test_lambda.py index e0a5cf84db..0a9349b52c 100644 --- a/tests/test_lambda.py +++ b/tests/test_lambda.py @@ -19,7 +19,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms.utility.array import Lambda from monai.utils.type_conversion import convert_to_numpy, convert_to_tensor -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestLambda(NumpyImageTestCase2D): diff --git a/tests/test_lambdad.py b/tests/test_lambdad.py index fad5ebeee4..3b177e040a 100644 --- a/tests/test_lambdad.py +++ b/tests/test_lambdad.py @@ -19,7 +19,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms.utility.dictionary import Lambdad from monai.utils.type_conversion import convert_to_numpy, convert_to_tensor -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestLambdad(NumpyImageTestCase2D): diff --git a/tests/test_lltm.py b/tests/test_lltm.py index cc64672e77..0b72e35146 100644 --- a/tests/test_lltm.py +++ b/tests/test_lltm.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.networks.layers import LLTM -from tests.utils import SkipIfNoModule, assert_allclose, is_tf32_env +from tests.test_utils import SkipIfNoModule, assert_allclose, is_tf32_env _rtol = 0.001 if is_tf32_env() else 0.0001 diff --git a/tests/test_lmdbdataset.py b/tests/test_lmdbdataset.py index 9d128dd728..c1fcee8071 100644 --- a/tests/test_lmdbdataset.py +++ b/tests/test_lmdbdataset.py @@ -21,7 +21,7 @@ from monai.data import LMDBDataset, json_hashing from monai.transforms import Compose, LoadImaged, SimulateDelayd, Transform -from tests.utils import skip_if_windows +from tests.test_utils import skip_if_windows TEST_CASE_1 = [ Compose( diff --git a/tests/test_lmdbdataset_dist.py b/tests/test_lmdbdataset_dist.py index 1acb89beb3..dc3fd2f9cb 100644 --- a/tests/test_lmdbdataset_dist.py +++ b/tests/test_lmdbdataset_dist.py @@ -19,7 +19,7 @@ from monai.data import LMDBDataset, json_hashing from monai.transforms import Transform -from tests.utils import DistCall, DistTestCase, skip_if_windows +from tests.test_utils import DistCall, DistTestCase, skip_if_windows class _InplaceXform(Transform): diff --git a/tests/test_load_image.py b/tests/test_load_image.py index 07acf7c179..2bd9d64078 100644 --- a/tests/test_load_image.py +++ b/tests/test_load_image.py @@ -29,7 +29,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import LoadImage from monai.utils import optional_import -from tests.utils import SkipIfNoModule, assert_allclose, skip_if_downloading_fails, testing_data_config +from tests.test_utils import SkipIfNoModule, assert_allclose, skip_if_downloading_fails, testing_data_config itk, has_itk = optional_import("itk", allow_namespace_pkg=True) ITKReader, _ = optional_import("monai.data", name="ITKReader", as_type="decorator") diff --git a/tests/test_load_imaged.py b/tests/test_load_imaged.py index 914240c705..62663fa1b3 100644 --- a/tests/test_load_imaged.py +++ b/tests/test_load_imaged.py @@ -28,7 +28,7 @@ from monai.transforms import Compose, EnsureChannelFirstD, FromMetaTensord, LoadImaged, SaveImageD from monai.transforms.meta_utility.dictionary import ToMetaTensord from monai.utils import optional_import -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose itk, has_itk = optional_import("itk", allow_namespace_pkg=True) diff --git a/tests/test_localnet.py b/tests/test_localnet.py index 97aa94d2c5..ee920436ff 100644 --- a/tests/test_localnet.py +++ b/tests/test_localnet.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets.regunet import LocalNet -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_lr_finder.py b/tests/test_lr_finder.py index d26cb23a90..e53539f6fd 100644 --- a/tests/test_lr_finder.py +++ b/tests/test_lr_finder.py @@ -27,7 +27,7 @@ from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, ScaleIntensityd, ToTensord from monai.utils import optional_import, set_determinism from monai.utils.misc import MONAIEnvVars -from tests.utils import skip_if_downloading_fails +from tests.test_utils import skip_if_downloading_fails if TYPE_CHECKING: import matplotlib.pyplot as plt diff --git a/tests/test_make_nifti.py b/tests/test_make_nifti.py index 08d3a731ab..b3d85c45c7 100644 --- a/tests/test_make_nifti.py +++ b/tests/test_make_nifti.py @@ -21,7 +21,7 @@ from monai.data.synthetic import create_test_image_2d from monai.utils import optional_import -from tests.utils import make_nifti_image +from tests.test_utils import make_nifti_image _, has_nib = optional_import("nibabel") diff --git a/tests/test_map_and_generate_sampling_centers.py b/tests/test_map_and_generate_sampling_centers.py index ff74f974b9..5868597ad6 100644 --- a/tests/test_map_and_generate_sampling_centers.py +++ b/tests/test_map_and_generate_sampling_centers.py @@ -19,7 +19,7 @@ from monai.transforms import map_and_generate_sampling_centers from monai.utils.misc import set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TEST_CASE_1 = [ # test Argmax data diff --git a/tests/test_map_binary_to_indices.py b/tests/test_map_binary_to_indices.py index 9931d997bb..e37adbc26c 100644 --- a/tests/test_map_binary_to_indices.py +++ b/tests/test_map_binary_to_indices.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import map_binary_to_indices -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_map_classes_to_indices.py b/tests/test_map_classes_to_indices.py index 902744ab65..c7b5c5bea0 100644 --- a/tests/test_map_classes_to_indices.py +++ b/tests/test_map_classes_to_indices.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import map_classes_to_indices -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_map_label_value.py b/tests/test_map_label_value.py index cd311df6bd..4e64dc5272 100644 --- a/tests/test_map_label_value.py +++ b/tests/test_map_label_value.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import MapLabelValue -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_map_label_valued.py b/tests/test_map_label_valued.py index 0fb46f2515..afc71ab21d 100644 --- a/tests/test_map_label_valued.py +++ b/tests/test_map_label_valued.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import MapLabelValued -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [ {"keys": "seg", "orig_labels": [3, 2, 1], "target_labels": [0, 1, 2]}, diff --git a/tests/test_mask_intensity.py b/tests/test_mask_intensity.py index b7ff324946..3c788029f5 100644 --- a/tests/test_mask_intensity.py +++ b/tests/test_mask_intensity.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import MaskIntensity -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TEST_CASE_1 = [ {"mask_data": np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]]])}, diff --git a/tests/test_masked_autoencoder_vit.py b/tests/test_masked_autoencoder_vit.py index f8f6977cc2..973fbab662 100644 --- a/tests/test_masked_autoencoder_vit.py +++ b/tests/test_masked_autoencoder_vit.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets.masked_autoencoder_vit import MaskedAutoEncoderViT -from tests.utils import skip_if_quick +from tests.test_utils import skip_if_quick TEST_CASE_MaskedAutoEncoderViT = [] for masking_ratio in [0.5]: diff --git a/tests/test_masked_loss.py b/tests/test_masked_loss.py index 3c04ffadcb..aaba9969a5 100644 --- a/tests/test_masked_loss.py +++ b/tests/test_masked_loss.py @@ -19,7 +19,7 @@ from monai.losses.dice import DiceFocalLoss, DiceLoss from monai.losses.spatial_mask import MaskedLoss from monai.utils import set_determinism -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_masked_patch_wsi_dataset.py b/tests/test_masked_patch_wsi_dataset.py index 8d24075595..59167b8b14 100644 --- a/tests/test_masked_patch_wsi_dataset.py +++ b/tests/test_masked_patch_wsi_dataset.py @@ -21,7 +21,7 @@ from monai.data import Dataset, MaskedPatchWSIDataset from monai.transforms import Lambdad from monai.utils import ProbMapKeys, WSIPatchKeys, optional_import, set_determinism -from tests.utils import download_url_or_skip_test, testing_data_config +from tests.test_utils import download_url_or_skip_test, testing_data_config set_determinism(0) diff --git a/tests/test_matshow3d.py b/tests/test_matshow3d.py index 2eba310f4e..7d5357aa4e 100644 --- a/tests/test_matshow3d.py +++ b/tests/test_matshow3d.py @@ -27,7 +27,7 @@ ) from monai.utils import optional_import from monai.visualize.utils import matshow3d -from tests.utils import SkipIfNoModule +from tests.test_utils import SkipIfNoModule compare_images, _ = optional_import("matplotlib.testing.compare", name="compare_images") pyplot, has_pyplot = optional_import("matplotlib", name="pyplot") diff --git a/tests/test_mean_ensemble.py b/tests/test_mean_ensemble.py index 6b463f8530..eddfe47281 100644 --- a/tests/test_mean_ensemble.py +++ b/tests/test_mean_ensemble.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import MeanEnsemble -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_mean_ensembled.py b/tests/test_mean_ensembled.py index 795ae47368..631b0d3a92 100644 --- a/tests/test_mean_ensembled.py +++ b/tests/test_mean_ensembled.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import MeanEnsembled -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_median_smooth.py b/tests/test_median_smooth.py index 5930c0c6b6..96f273b4cb 100644 --- a/tests/test_median_smooth.py +++ b/tests/test_median_smooth.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import MedianSmooth -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] diff --git a/tests/test_median_smoothd.py b/tests/test_median_smoothd.py index e0bdb331c8..0ca282991a 100644 --- a/tests/test_median_smoothd.py +++ b/tests/test_median_smoothd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import MedianSmoothd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS[0:1]: diff --git a/tests/test_mednistdataset.py b/tests/test_mednistdataset.py index c1b21e9373..7c6f837dc8 100644 --- a/tests/test_mednistdataset.py +++ b/tests/test_mednistdataset.py @@ -19,7 +19,7 @@ from monai.apps import MedNISTDataset from monai.data import MetaTensor from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, ScaleIntensityd -from tests.utils import skip_if_downloading_fails, skip_if_quick +from tests.test_utils import skip_if_downloading_fails, skip_if_quick MEDNIST_FULL_DATASET_LENGTH = 58954 diff --git a/tests/test_meta_affine.py b/tests/test_meta_affine.py index 890734391f..e81852cfd6 100644 --- a/tests/test_meta_affine.py +++ b/tests/test_meta_affine.py @@ -34,7 +34,7 @@ Transform, ) from monai.utils import convert_data_type, optional_import -from tests.utils import assert_allclose, download_url_or_skip_test, testing_data_config +from tests.test_utils import assert_allclose, download_url_or_skip_test, testing_data_config itk, has_itk = optional_import("itk") TINY_DIFF = 1e-4 diff --git a/tests/test_meta_tensor.py b/tests/test_meta_tensor.py index 60b6019703..f0c6abc3b1 100644 --- a/tests/test_meta_tensor.py +++ b/tests/test_meta_tensor.py @@ -34,7 +34,7 @@ from monai.transforms import BorderPadd, Compose, DivisiblePadd, FromMetaTensord, ToMetaTensord from monai.utils.enums import PostFix from monai.utils.module import pytorch_after -from tests.utils import TEST_DEVICES, SkipIfBeforePyTorchVersion, assert_allclose, skip_if_no_cuda +from tests.test_utils import TEST_DEVICES, SkipIfBeforePyTorchVersion, assert_allclose, skip_if_no_cuda DTYPES = [[torch.float32], [torch.float64], [torch.float16], [torch.int64], [torch.int32], [None]] TESTS = [] diff --git a/tests/test_metatensor_integration.py b/tests/test_metatensor_integration.py index d647e47e74..11c51ebb77 100644 --- a/tests/test_metatensor_integration.py +++ b/tests/test_metatensor_integration.py @@ -25,7 +25,7 @@ from monai.data.utils import TraceKeys from monai.transforms import InvertD, SaveImageD, reset_ops_id from monai.utils import optional_import, set_determinism -from tests.utils import assert_allclose, download_url_or_skip_test, testing_data_config +from tests.test_utils import assert_allclose, download_url_or_skip_test, testing_data_config nib, has_nib = optional_import("nibabel") TINY_DIFF = 0.1 diff --git a/tests/test_milmodel.py b/tests/test_milmodel.py index 42116e8220..ee2b969ea2 100644 --- a/tests/test_milmodel.py +++ b/tests/test_milmodel.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.nets import MILModel from monai.utils.module import optional_import -from tests.utils import skip_if_downloading_fails, test_script_save +from tests.test_utils import skip_if_downloading_fails, test_script_save models, _ = optional_import("torchvision.models") diff --git a/tests/test_mmar_download.py b/tests/test_mmar_download.py index 2ac73a8149..7ce0cc44cc 100644 --- a/tests/test_mmar_download.py +++ b/tests/test_mmar_download.py @@ -25,7 +25,7 @@ from monai.apps.mmars import MODEL_DESC from monai.apps.mmars.mmars import _get_val from monai.utils import version_leq -from tests.utils import skip_if_downloading_fails, skip_if_quick +from tests.test_utils import skip_if_downloading_fails, skip_if_quick TEST_CASES = [["clara_pt_prostate_mri_segmentation"], ["clara_pt_covid19_ct_lesion_segmentation"]] TEST_EXTRACT_CASES = [ diff --git a/tests/test_morphological_ops.py b/tests/test_morphological_ops.py index 422e8c4b9d..b43f382753 100644 --- a/tests/test_morphological_ops.py +++ b/tests/test_morphological_ops.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms.utils_morphological_ops import dilate, erode, get_morphological_filter_result_t -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS_SHAPE = [] for p in TEST_NDARRAYS: diff --git a/tests/test_mri_utils.py b/tests/test_mri_utils.py index aabf06d02e..e2ebb30b67 100644 --- a/tests/test_mri_utils.py +++ b/tests/test_mri_utils.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.apps.reconstruction.mri_utils import root_sum_of_squares -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose # root_sum_of_squares im = [[3.0, 4.0], [3.0, 4.0]] diff --git a/tests/test_multi_scale.py b/tests/test_multi_scale.py index 0b49087216..e57e62a3ad 100644 --- a/tests/test_multi_scale.py +++ b/tests/test_multi_scale.py @@ -18,7 +18,7 @@ from monai.losses import DiceLoss from monai.losses.multi_scale import MultiScaleLoss -from tests.utils import test_script_save +from tests.test_utils import test_script_save dice_loss = DiceLoss(include_background=True, sigmoid=True, smooth_nr=1e-5, smooth_dr=1e-5) device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_net_adapter.py b/tests/test_net_adapter.py index 242326e242..c441f7409b 100644 --- a/tests/test_net_adapter.py +++ b/tests/test_net_adapter.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets import NetAdapter, resnet18 -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_network_consistency.py b/tests/test_network_consistency.py index bcfd448144..6b67ba8ab2 100644 --- a/tests/test_network_consistency.py +++ b/tests/test_network_consistency.py @@ -24,7 +24,7 @@ import monai.networks.nets as nets from monai.utils import set_determinism from monai.utils.misc import MONAIEnvVars -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose extra_test_data_dir = MONAIEnvVars.extra_test_data() diff --git a/tests/test_nifti_rw.py b/tests/test_nifti_rw.py index 8543fcea30..dded2b19c3 100644 --- a/tests/test_nifti_rw.py +++ b/tests/test_nifti_rw.py @@ -21,7 +21,7 @@ from monai.data import NibabelWriter from monai.transforms import LoadImage, Orientation, Spacing -from tests.utils import TEST_NDARRAYS, assert_allclose, make_nifti_image +from tests.test_utils import TEST_NDARRAYS, assert_allclose, make_nifti_image TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_normalize_intensity.py b/tests/test_normalize_intensity.py index 7efd0d83e5..b427264b0f 100644 --- a/tests/test_normalize_intensity.py +++ b/tests/test_normalize_intensity.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import NormalizeIntensity -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_normalize_intensityd.py b/tests/test_normalize_intensityd.py index 229dcd00ff..d9bc14d95a 100644 --- a/tests/test_normalize_intensityd.py +++ b/tests/test_normalize_intensityd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import NormalizeIntensityd -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_numpy_reader.py b/tests/test_numpy_reader.py index 6303598bb7..bfb9e1b15b 100644 --- a/tests/test_numpy_reader.py +++ b/tests/test_numpy_reader.py @@ -20,7 +20,7 @@ from monai.data import DataLoader, Dataset, NumpyReader from monai.transforms import LoadImage, LoadImaged -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestNumpyReader(unittest.TestCase): diff --git a/tests/test_nvtx_decorator.py b/tests/test_nvtx_decorator.py index efd2906972..70da469a65 100644 --- a/tests/test_nvtx_decorator.py +++ b/tests/test_nvtx_decorator.py @@ -35,7 +35,7 @@ ToTensord, ) from monai.utils import Range, optional_import -from tests.utils import HAS_CUPY +from tests.test_utils import HAS_CUPY _, has_nvtx = optional_import("torch._C._nvtx", descriptor="NVTX is not installed. Are you sure you have a CUDA build?") _, has_tvt = optional_import("torchvision.transforms") diff --git a/tests/test_ori_ras_lps.py b/tests/test_ori_ras_lps.py index 39c0a57877..9536c64b35 100644 --- a/tests/test_ori_ras_lps.py +++ b/tests/test_ori_ras_lps.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.data.utils import orientation_ras_lps -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TEST_CASES_AFFINE = [] for p in TEST_NDARRAYS: diff --git a/tests/test_orientation.py b/tests/test_orientation.py index 2f3334e622..17482cd41d 100644 --- a/tests/test_orientation.py +++ b/tests/test_orientation.py @@ -22,7 +22,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import Orientation, create_rotate, create_translate from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_DEVICES, assert_allclose +from tests.test_utils import TEST_DEVICES, assert_allclose TESTS = [] for device in TEST_DEVICES: diff --git a/tests/test_orientationd.py b/tests/test_orientationd.py index b885266c69..24c1644557 100644 --- a/tests/test_orientationd.py +++ b/tests/test_orientationd.py @@ -22,7 +22,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import Orientationd from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_DEVICES +from tests.test_utils import TEST_DEVICES TESTS = [] for device in TEST_DEVICES: diff --git a/tests/test_pad_mode.py b/tests/test_pad_mode.py index 54ee2c6d75..1992b83d52 100644 --- a/tests/test_pad_mode.py +++ b/tests/test_pad_mode.py @@ -18,7 +18,7 @@ from monai.transforms import CastToType, Pad from monai.utils import NumpyPadMode, PytorchPadMode -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion @SkipIfBeforePyTorchVersion((1, 10, 1)) diff --git a/tests/test_patch_gan_dicriminator.py b/tests/test_patch_gan_dicriminator.py index c19898e70d..184f76fa9d 100644 --- a/tests/test_patch_gan_dicriminator.py +++ b/tests/test_patch_gan_dicriminator.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets import MultiScalePatchDiscriminator, PatchDiscriminator -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_PATCHGAN = [ [ diff --git a/tests/test_patch_inferer.py b/tests/test_patch_inferer.py index c6308224b0..2deab6fe73 100644 --- a/tests/test_patch_inferer.py +++ b/tests/test_patch_inferer.py @@ -19,7 +19,7 @@ from monai.data.meta_tensor import MetaTensor from monai.inferers import AvgMerger, PatchInferer, SlidingWindowSplitter -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TENSOR_4x4 = torch.randint(low=0, high=255, size=(2, 3, 4, 4), dtype=torch.float32) TENSOR_2x2 = avg_pool2d(TENSOR_4x4, 2, 2) diff --git a/tests/test_patch_wsi_dataset.py b/tests/test_patch_wsi_dataset.py index 70e01eaaf4..9203cb2d1a 100644 --- a/tests/test_patch_wsi_dataset.py +++ b/tests/test_patch_wsi_dataset.py @@ -23,7 +23,7 @@ from monai.data.wsi_reader import CuCIMWSIReader, OpenSlideWSIReader from monai.utils import optional_import from monai.utils.enums import WSIPatchKeys -from tests.utils import download_url_or_skip_test, testing_data_config +from tests.test_utils import download_url_or_skip_test, testing_data_config cucim, has_cim = optional_import("cucim") has_cim = has_cim and hasattr(cucim, "CuImage") diff --git a/tests/test_patchembedding.py b/tests/test_patchembedding.py index 71ac767966..30e4b11883 100644 --- a/tests/test_patchembedding.py +++ b/tests/test_patchembedding.py @@ -21,7 +21,7 @@ from monai.networks import eval_mode from monai.networks.blocks.patchembedding import PatchEmbed, PatchEmbeddingBlock from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion einops, has_einops = optional_import("einops") diff --git a/tests/test_perceptual_loss.py b/tests/test_perceptual_loss.py index b8aa2e5982..30907e8468 100644 --- a/tests/test_perceptual_loss.py +++ b/tests/test_perceptual_loss.py @@ -18,7 +18,7 @@ from monai.losses import PerceptualLoss from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion, assert_allclose, skip_if_downloading_fails, skip_if_quick +from tests.test_utils import SkipIfBeforePyTorchVersion, assert_allclose, skip_if_downloading_fails, skip_if_quick _, has_torchvision = optional_import("torchvision") TEST_CASES = [ diff --git a/tests/test_persistentdataset_dist.py b/tests/test_persistentdataset_dist.py index c369af9e92..2a9df63c06 100644 --- a/tests/test_persistentdataset_dist.py +++ b/tests/test_persistentdataset_dist.py @@ -21,7 +21,7 @@ from monai.data import PersistentDataset, json_hashing from monai.transforms import Transform -from tests.utils import DistCall, DistTestCase +from tests.test_utils import DistCall, DistTestCase class _InplaceXform(Transform): diff --git a/tests/test_phl_cpu.py b/tests/test_phl_cpu.py index 6f872a4776..12b840cabf 100644 --- a/tests/test_phl_cpu.py +++ b/tests/test_phl_cpu.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.networks.layers.filtering import PHLFilter -from tests.utils import skip_if_no_cpp_extension +from tests.test_utils import skip_if_no_cpp_extension TEST_CASES = [ [ diff --git a/tests/test_phl_cuda.py b/tests/test_phl_cuda.py index b410ea8722..046b06e71e 100644 --- a/tests/test_phl_cuda.py +++ b/tests/test_phl_cuda.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.networks.layers.filtering import PHLFilter -from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda +from tests.test_utils import skip_if_no_cpp_extension, skip_if_no_cuda TEST_CASES = [ [ diff --git a/tests/test_plot_2d_or_3d_image.py b/tests/test_plot_2d_or_3d_image.py index 16241853b3..231e6b4161 100644 --- a/tests/test_plot_2d_or_3d_image.py +++ b/tests/test_plot_2d_or_3d_image.py @@ -20,7 +20,7 @@ from monai.utils import optional_import from monai.visualize import plot_2d_or_3d_image -from tests.utils import SkipIfBeforePyTorchVersion, SkipIfNoModule +from tests.test_utils import SkipIfBeforePyTorchVersion, SkipIfNoModule SummaryWriter, has_tb = optional_import("torch.utils.tensorboard", name="SummaryWriter") diff --git a/tests/test_point_based_window_inferer.py b/tests/test_point_based_window_inferer.py index 1b293288c4..de57d39606 100644 --- a/tests/test_point_based_window_inferer.py +++ b/tests/test_point_based_window_inferer.py @@ -20,7 +20,7 @@ from monai.networks import eval_mode from monai.networks.nets.vista3d import vista3d132 from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_quick +from tests.test_utils import SkipIfBeforePyTorchVersion, skip_if_quick device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_prepare_batch_default.py b/tests/test_prepare_batch_default.py index 093468ce27..cff3d38281 100644 --- a/tests/test_prepare_batch_default.py +++ b/tests/test_prepare_batch_default.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.engines import PrepareBatchDefault, SupervisedEvaluator -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestNet(torch.nn.Module): diff --git a/tests/test_prepare_batch_default_dist.py b/tests/test_prepare_batch_default_dist.py index 53a79575e6..c974db7898 100644 --- a/tests/test_prepare_batch_default_dist.py +++ b/tests/test_prepare_batch_default_dist.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.engines import PrepareBatchDefault, SupervisedEvaluator -from tests.utils import DistCall, DistTestCase, assert_allclose +from tests.test_utils import DistCall, DistTestCase, assert_allclose TEST_CASE_1 = [ [ diff --git a/tests/test_prepare_batch_extra_input.py b/tests/test_prepare_batch_extra_input.py index 3c53cc6481..5b0afa3e38 100644 --- a/tests/test_prepare_batch_extra_input.py +++ b/tests/test_prepare_batch_extra_input.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.engines import PrepareBatchExtraInput, SupervisedEvaluator -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_0 = [ {"extra_keys": "extra1"}, diff --git a/tests/test_prepare_batch_hovernet.py b/tests/test_prepare_batch_hovernet.py index ae9554a3e8..dcff4cfb5b 100644 --- a/tests/test_prepare_batch_hovernet.py +++ b/tests/test_prepare_batch_hovernet.py @@ -19,7 +19,7 @@ from monai.apps.pathology.engines import PrepareBatchHoVerNet from monai.engines import SupervisedEvaluator from monai.utils.enums import HoVerNetBranch -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_0 = [ {"extra_keys": ["extra_label1", "extra_label2"]}, diff --git a/tests/test_probnms.py b/tests/test_probnms.py index 2b52583ad4..4cba908b39 100644 --- a/tests/test_probnms.py +++ b/tests/test_probnms.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms.post.array import ProbNMS -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_probnmsd.py b/tests/test_probnmsd.py index aeb32bdb79..b4c8a37c95 100644 --- a/tests/test_probnmsd.py +++ b/tests/test_probnmsd.py @@ -19,7 +19,7 @@ from parameterized import parameterized from monai.transforms.post.dictionary import ProbNMSD -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS: list[Any] = [] for p in TEST_NDARRAYS: diff --git a/tests/test_profiling.py b/tests/test_profiling.py index 649d980ebf..d960531a54 100644 --- a/tests/test_profiling.py +++ b/tests/test_profiling.py @@ -23,7 +23,7 @@ from monai.utils import first, optional_import from monai.utils.enums import CommonKeys from monai.utils.profiling import ProfileHandler, ProfileResult, WorkflowProfiler -from tests.utils import SkipIfNoModule +from tests.test_utils import SkipIfNoModule pd, _ = optional_import("pandas") diff --git a/tests/test_query_memory.py b/tests/test_query_memory.py index 77c34ede39..fd703c4013 100644 --- a/tests/test_query_memory.py +++ b/tests/test_query_memory.py @@ -13,7 +13,7 @@ import unittest -from tests.utils import query_memory +from tests.test_utils import query_memory class TestQueryMemory(unittest.TestCase): diff --git a/tests/test_quicknat.py b/tests/test_quicknat.py index f6786405d2..918e4c6e28 100644 --- a/tests/test_quicknat.py +++ b/tests/test_quicknat.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.nets import Quicknat from monai.utils import optional_import -from tests.utils import test_script_save +from tests.test_utils import test_script_save _, has_se = optional_import("squeeze_and_excitation") diff --git a/tests/test_rand_adjust_contrast.py b/tests/test_rand_adjust_contrast.py index 72d0df141e..777f14bcfe 100644 --- a/tests/test_rand_adjust_contrast.py +++ b/tests/test_rand_adjust_contrast.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandAdjustContrast -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose TEST_CASE_1 = [(0.5, 4.5)] diff --git a/tests/test_rand_adjust_contrastd.py b/tests/test_rand_adjust_contrastd.py index bbd5c22009..d18782580e 100644 --- a/tests/test_rand_adjust_contrastd.py +++ b/tests/test_rand_adjust_contrastd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandAdjustContrastd -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose TEST_CASE_1 = [(0.5, 4.5)] diff --git a/tests/test_rand_affine.py b/tests/test_rand_affine.py index 2c827b7426..6b544d2be2 100644 --- a/tests/test_rand_affine.py +++ b/tests/test_rand_affine.py @@ -20,7 +20,7 @@ from monai.transforms import RandAffine from monai.utils.type_conversion import convert_data_type from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, is_tf32_env +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, is_tf32_env _rtol = 1e-3 if is_tf32_env() else 1e-4 diff --git a/tests/test_rand_affine_grid.py b/tests/test_rand_affine_grid.py index 91558ebd03..0912abc297 100644 --- a/tests/test_rand_affine_grid.py +++ b/tests/test_rand_affine_grid.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import RandAffineGrid -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, is_tf32_env +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, is_tf32_env _rtol = 1e-1 if is_tf32_env() else 1e-4 diff --git a/tests/test_rand_affined.py b/tests/test_rand_affined.py index eb8ebd06c5..83848e7482 100644 --- a/tests/test_rand_affined.py +++ b/tests/test_rand_affined.py @@ -22,7 +22,7 @@ from monai.transforms import RandAffined from monai.utils import GridSampleMode, ensure_tuple_rep from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import assert_allclose, is_tf32_env +from tests.test_utils import assert_allclose, is_tf32_env _rtol = 1e-3 if is_tf32_env() else 1e-4 diff --git a/tests/test_rand_axis_flip.py b/tests/test_rand_axis_flip.py index 9c465a0bcb..476cfeca16 100644 --- a/tests/test_rand_axis_flip.py +++ b/tests/test_rand_axis_flip.py @@ -19,7 +19,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import RandAxisFlip from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion class TestRandAxisFlip(NumpyImageTestCase2D): diff --git a/tests/test_rand_axis_flipd.py b/tests/test_rand_axis_flipd.py index d3abef1be4..e0ae28cf37 100644 --- a/tests/test_rand_axis_flipd.py +++ b/tests/test_rand_axis_flipd.py @@ -19,7 +19,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import RandAxisFlipd from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase3D, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase3D, assert_allclose, test_local_inversion class TestRandAxisFlip(NumpyImageTestCase3D): diff --git a/tests/test_rand_bias_field.py b/tests/test_rand_bias_field.py index 328f46b7ee..682e6a008a 100644 --- a/tests/test_rand_bias_field.py +++ b/tests/test_rand_bias_field.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import RandBiasField -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TEST_CASES_2D = [{"prob": 1.0}, (3, 32, 32)] TEST_CASES_3D = [{"prob": 1.0}, (3, 32, 32, 32)] diff --git a/tests/test_rand_coarse_dropout.py b/tests/test_rand_coarse_dropout.py index ac857f9184..8df823d236 100644 --- a/tests/test_rand_coarse_dropout.py +++ b/tests/test_rand_coarse_dropout.py @@ -19,7 +19,7 @@ from monai.transforms import RandCoarseDropout from monai.utils import fall_back_tuple -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TEST_CASE_0 = [ {"holes": 2, "spatial_size": [2, 2, 2], "fill_value": 5, "prob": 1.0}, diff --git a/tests/test_rand_crop_by_label_classes.py b/tests/test_rand_crop_by_label_classes.py index 743b894d75..12b235ea7f 100644 --- a/tests/test_rand_crop_by_label_classes.py +++ b/tests/test_rand_crop_by_label_classes.py @@ -19,7 +19,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import ClassesToIndices, RandCropByLabelClasses from monai.transforms.lazy.functional import apply_pending -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS_INDICES, TESTS_SHAPE = [], [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_rand_crop_by_label_classesd.py b/tests/test_rand_crop_by_label_classesd.py index 8908c456ee..4fd415ec59 100644 --- a/tests/test_rand_crop_by_label_classesd.py +++ b/tests/test_rand_crop_by_label_classesd.py @@ -19,7 +19,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import ClassesToIndicesd, RandCropByLabelClassesd from monai.transforms.lazy.functional import apply_pending -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_rand_crop_by_pos_neg_label.py b/tests/test_rand_crop_by_pos_neg_label.py index 66e7a5e849..ef7ae44987 100644 --- a/tests/test_rand_crop_by_pos_neg_label.py +++ b/tests/test_rand_crop_by_pos_neg_label.py @@ -20,7 +20,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import RandCropByPosNegLabel from monai.transforms.lazy.functional import apply_pending -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS = [ [ diff --git a/tests/test_rand_crop_by_pos_neg_labeld.py b/tests/test_rand_crop_by_pos_neg_labeld.py index 11381e226d..4a1b152d95 100644 --- a/tests/test_rand_crop_by_pos_neg_labeld.py +++ b/tests/test_rand_crop_by_pos_neg_labeld.py @@ -20,7 +20,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import RandCropByPosNegLabeld from monai.transforms.lazy.functional import apply_pending -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS = [ [ diff --git a/tests/test_rand_cucim_dict_transform.py b/tests/test_rand_cucim_dict_transform.py index 3f473897dd..d5cb1ad1c6 100644 --- a/tests/test_rand_cucim_dict_transform.py +++ b/tests/test_rand_cucim_dict_transform.py @@ -18,7 +18,7 @@ from monai.transforms import RandCuCIMd from monai.utils import optional_import, set_determinism -from tests.utils import HAS_CUPY, skip_if_no_cuda +from tests.test_utils import HAS_CUPY, skip_if_no_cuda _, has_cut = optional_import("cucim.core.operations.expose.transform") cp, _ = optional_import("cupy") diff --git a/tests/test_rand_cucim_transform.py b/tests/test_rand_cucim_transform.py index ce731a05ae..a7b4d8aecd 100644 --- a/tests/test_rand_cucim_transform.py +++ b/tests/test_rand_cucim_transform.py @@ -18,7 +18,7 @@ from monai.transforms import RandCuCIM from monai.utils import optional_import, set_determinism -from tests.utils import HAS_CUPY, skip_if_no_cuda +from tests.test_utils import HAS_CUPY, skip_if_no_cuda _, has_cut = optional_import("cucim.core.operations.expose.transform") cp, _ = optional_import("cupy") diff --git a/tests/test_rand_deform_grid.py b/tests/test_rand_deform_grid.py index 88fc1333ec..53a9e1195f 100644 --- a/tests/test_rand_deform_grid.py +++ b/tests/test_rand_deform_grid.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandDeformGrid -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASES = [ [ diff --git a/tests/test_rand_elastic_2d.py b/tests/test_rand_elastic_2d.py index 1f3d389a93..7c3eefc389 100644 --- a/tests/test_rand_elastic_2d.py +++ b/tests/test_rand_elastic_2d.py @@ -19,7 +19,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import Rand2DElastic -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, is_tf32_env +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, is_tf32_env _rtol = 5e-3 if is_tf32_env() else 1e-4 diff --git a/tests/test_rand_elastic_3d.py b/tests/test_rand_elastic_3d.py index 5bfa8a6e83..df60bae710 100644 --- a/tests/test_rand_elastic_3d.py +++ b/tests/test_rand_elastic_3d.py @@ -19,7 +19,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import Rand3DElastic -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_rand_elasticd_2d.py b/tests/test_rand_elasticd_2d.py index 10aa116192..8a2b189531 100644 --- a/tests/test_rand_elasticd_2d.py +++ b/tests/test_rand_elasticd_2d.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import Rand2DElasticd -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, is_tf32_env +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, is_tf32_env _rtol = 5e-3 if is_tf32_env() else 1e-4 diff --git a/tests/test_rand_elasticd_3d.py b/tests/test_rand_elasticd_3d.py index 3838f43f29..5d9242373c 100644 --- a/tests/test_rand_elasticd_3d.py +++ b/tests/test_rand_elasticd_3d.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import Rand3DElasticd -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_rand_flip.py b/tests/test_rand_flip.py index faeae94cab..e15cd0b652 100644 --- a/tests/test_rand_flip.py +++ b/tests/test_rand_flip.py @@ -20,7 +20,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import RandFlip from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion INVALID_CASES = [("wrong_axis", ["s", 1], TypeError), ("not_numbers", "s", TypeError)] diff --git a/tests/test_rand_flipd.py b/tests/test_rand_flipd.py index a34aa58ed2..e234eafbf0 100644 --- a/tests/test_rand_flipd.py +++ b/tests/test_rand_flipd.py @@ -20,7 +20,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import RandFlipd from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion VALID_CASES = [("no_axis", None), ("one_axis", 1), ("many_axis", [0, 1])] diff --git a/tests/test_rand_gaussian_noise.py b/tests/test_rand_gaussian_noise.py index 233b4dd1b6..e2f04acb94 100644 --- a/tests/test_rand_gaussian_noise.py +++ b/tests/test_rand_gaussian_noise.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import RandGaussianNoise -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_gaussian_noised.py b/tests/test_rand_gaussian_noised.py index e3df196be2..2f3d97db25 100644 --- a/tests/test_rand_gaussian_noised.py +++ b/tests/test_rand_gaussian_noised.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import RandGaussianNoised -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_gaussian_sharpen.py b/tests/test_rand_gaussian_sharpen.py index ee8604c14b..470be5bc98 100644 --- a/tests/test_rand_gaussian_sharpen.py +++ b/tests/test_rand_gaussian_sharpen.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import RandGaussianSharpen -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] diff --git a/tests/test_rand_gaussian_sharpend.py b/tests/test_rand_gaussian_sharpend.py index b9bae529db..564b79bb36 100644 --- a/tests/test_rand_gaussian_sharpend.py +++ b/tests/test_rand_gaussian_sharpend.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandGaussianSharpend -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_gaussian_smooth.py b/tests/test_rand_gaussian_smooth.py index 8bb36ca0fa..1edb303bbc 100644 --- a/tests/test_rand_gaussian_smooth.py +++ b/tests/test_rand_gaussian_smooth.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandGaussianSmooth -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_gaussian_smoothd.py b/tests/test_rand_gaussian_smoothd.py index a93b355184..10f26173db 100644 --- a/tests/test_rand_gaussian_smoothd.py +++ b/tests/test_rand_gaussian_smoothd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandGaussianSmoothd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_gibbs_noise.py b/tests/test_rand_gibbs_noise.py index 5ef249a1f4..b779426206 100644 --- a/tests/test_rand_gibbs_noise.py +++ b/tests/test_rand_gibbs_noise.py @@ -21,7 +21,7 @@ from monai.transforms import RandGibbsNoise from monai.utils.misc import set_determinism from monai.utils.module import optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_torch_fft = optional_import("torch.fft", name="fftshift") diff --git a/tests/test_rand_gibbs_noised.py b/tests/test_rand_gibbs_noised.py index 382290dd39..47762fae4d 100644 --- a/tests/test_rand_gibbs_noised.py +++ b/tests/test_rand_gibbs_noised.py @@ -21,7 +21,7 @@ from monai.transforms import RandGibbsNoised from monai.utils.misc import set_determinism from monai.utils.module import optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose _, has_torch_fft = optional_import("torch.fft", name="fftshift") diff --git a/tests/test_rand_grid_distortion.py b/tests/test_rand_grid_distortion.py index e07c311b25..98b470c468 100644 --- a/tests/test_rand_grid_distortion.py +++ b/tests/test_rand_grid_distortion.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandGridDistortion -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_rand_grid_distortiond.py b/tests/test_rand_grid_distortiond.py index f28e0ae86e..ad03dd4642 100644 --- a/tests/test_rand_grid_distortiond.py +++ b/tests/test_rand_grid_distortiond.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandGridDistortiond -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS = [] num_cells = 2 diff --git a/tests/test_rand_grid_patch.py b/tests/test_rand_grid_patch.py index 26863f01b2..efa4491375 100644 --- a/tests/test_rand_grid_patch.py +++ b/tests/test_rand_grid_patch.py @@ -20,7 +20,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms.spatial.array import RandGridPatch from monai.utils import set_determinism -from tests.utils import TEST_NDARRAYS, SkipIfBeforePyTorchVersion, assert_allclose +from tests.test_utils import TEST_NDARRAYS, SkipIfBeforePyTorchVersion, assert_allclose A = np.arange(16).repeat(3).reshape(4, 4, 3).transpose(2, 0, 1) A11 = A[:, :2, :2] diff --git a/tests/test_rand_grid_patchd.py b/tests/test_rand_grid_patchd.py index 031e834512..bc763b27b0 100644 --- a/tests/test_rand_grid_patchd.py +++ b/tests/test_rand_grid_patchd.py @@ -19,7 +19,7 @@ from monai.transforms.spatial.dictionary import RandGridPatchd from monai.utils import set_determinism -from tests.utils import TEST_NDARRAYS, SkipIfBeforePyTorchVersion, assert_allclose +from tests.test_utils import TEST_NDARRAYS, SkipIfBeforePyTorchVersion, assert_allclose A = np.arange(16).repeat(3).reshape(4, 4, 3).transpose(2, 0, 1) A11 = A[:, :2, :2] diff --git a/tests/test_rand_histogram_shift.py b/tests/test_rand_histogram_shift.py index 785e24e53b..fceca8098a 100644 --- a/tests/test_rand_histogram_shift.py +++ b/tests/test_rand_histogram_shift.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import RandHistogramShift -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_histogram_shiftd.py b/tests/test_rand_histogram_shiftd.py index fced270e90..5e971a1827 100644 --- a/tests/test_rand_histogram_shiftd.py +++ b/tests/test_rand_histogram_shiftd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms.intensity.dictionary import RandHistogramShiftd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_k_space_spike_noise.py b/tests/test_rand_k_space_spike_noise.py index 7a9dd4288d..3096896ac6 100644 --- a/tests/test_rand_k_space_spike_noise.py +++ b/tests/test_rand_k_space_spike_noise.py @@ -19,7 +19,7 @@ from monai.data.synthetic import create_test_image_2d, create_test_image_3d from monai.transforms import KSpaceSpikeNoise, RandKSpaceSpikeNoise from monai.utils.misc import set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for shape in ((128, 64), (64, 48, 80)): diff --git a/tests/test_rand_k_space_spike_noised.py b/tests/test_rand_k_space_spike_noised.py index 86d4256637..12ad15f3cc 100644 --- a/tests/test_rand_k_space_spike_noised.py +++ b/tests/test_rand_k_space_spike_noised.py @@ -19,7 +19,7 @@ from monai.data.synthetic import create_test_image_2d, create_test_image_3d from monai.transforms import RandKSpaceSpikeNoised from monai.utils.misc import set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for shape in ((128, 64), (64, 48, 80)): diff --git a/tests/test_rand_lambda.py b/tests/test_rand_lambda.py index 98a324aec5..fe89202fef 100644 --- a/tests/test_rand_lambda.py +++ b/tests/test_rand_lambda.py @@ -20,7 +20,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms.transform import Randomizable from monai.transforms.utility.array import RandLambda -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose class RandTest(Randomizable): diff --git a/tests/test_rand_lambdad.py b/tests/test_rand_lambdad.py index 5247d79843..19049c288e 100644 --- a/tests/test_rand_lambdad.py +++ b/tests/test_rand_lambdad.py @@ -20,7 +20,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms.transform import Randomizable from monai.transforms.utility.dictionary import RandLambdad -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose class RandTest(Randomizable): diff --git a/tests/test_rand_rician_noise.py b/tests/test_rand_rician_noise.py index 8dd1c48e29..013d76656d 100644 --- a/tests/test_rand_rician_noise.py +++ b/tests/test_rand_rician_noise.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import RandRicianNoise -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_rician_noised.py b/tests/test_rand_rician_noised.py index a190ba866d..9132d191e3 100644 --- a/tests/test_rand_rician_noised.py +++ b/tests/test_rand_rician_noised.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import RandRicianNoised -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_rotate.py b/tests/test_rand_rotate.py index c54229dcfe..41ac3f8179 100644 --- a/tests/test_rand_rotate.py +++ b/tests/test_rand_rotate.py @@ -22,7 +22,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import RandRotate from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import ( +from tests.test_utils import ( TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, diff --git a/tests/test_rand_rotate90.py b/tests/test_rand_rotate90.py index be2e658b78..864cc3789d 100644 --- a/tests/test_rand_rotate90.py +++ b/tests/test_rand_rotate90.py @@ -19,7 +19,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import RandRotate90 from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion class TestRandRotate90(NumpyImageTestCase2D): diff --git a/tests/test_rand_rotate90d.py b/tests/test_rand_rotate90d.py index 02836b5dd8..c521a36e4c 100644 --- a/tests/test_rand_rotate90d.py +++ b/tests/test_rand_rotate90d.py @@ -19,7 +19,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import RandRotate90d from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion class TestRandRotate90d(NumpyImageTestCase2D): diff --git a/tests/test_rand_rotated.py b/tests/test_rand_rotated.py index 71d0f67b63..1849cf0b00 100644 --- a/tests/test_rand_rotated.py +++ b/tests/test_rand_rotated.py @@ -22,7 +22,7 @@ from monai.transforms import RandRotated from monai.utils import GridSampleMode, GridSamplePadMode from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, test_local_inversion TEST_CASES_2D: list[tuple] = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_rand_scale_crop.py b/tests/test_rand_scale_crop.py index bf43273fcf..464ab4438d 100644 --- a/tests/test_rand_scale_crop.py +++ b/tests/test_rand_scale_crop.py @@ -18,7 +18,7 @@ from monai.transforms import RandScaleCrop from tests.croppers import CropTest -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TEST_SHAPES = [ [{"roi_scale": [1.0, 1.0, -1.0], "random_center": True}, (3, 3, 3, 4), (3, 3, 3, 4)], diff --git a/tests/test_rand_scale_cropd.py b/tests/test_rand_scale_cropd.py index 15a48a55d7..27d3e5a44f 100644 --- a/tests/test_rand_scale_cropd.py +++ b/tests/test_rand_scale_cropd.py @@ -18,7 +18,7 @@ from monai.transforms import RandScaleCropd from tests.croppers import CropTest -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TEST_SHAPES = [ [{"keys": "img", "roi_scale": [1.0, 1.0, -1.0], "random_center": True}, (3, 3, 3, 4), (3, 3, 3, 4)], diff --git a/tests/test_rand_scale_intensity.py b/tests/test_rand_scale_intensity.py index 7e999c00b3..febbe0058a 100644 --- a/tests/test_rand_scale_intensity.py +++ b/tests/test_rand_scale_intensity.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandScaleIntensity -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestRandScaleIntensity(NumpyImageTestCase2D): diff --git a/tests/test_rand_scale_intensity_fixed_mean.py b/tests/test_rand_scale_intensity_fixed_mean.py index 9324c711fa..4acec4fb5d 100644 --- a/tests/test_rand_scale_intensity_fixed_mean.py +++ b/tests/test_rand_scale_intensity_fixed_mean.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandScaleIntensityFixedMean -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestRandScaleIntensity(NumpyImageTestCase2D): diff --git a/tests/test_rand_scale_intensity_fixed_meand.py b/tests/test_rand_scale_intensity_fixed_meand.py index 8c127ac130..66058943e8 100644 --- a/tests/test_rand_scale_intensity_fixed_meand.py +++ b/tests/test_rand_scale_intensity_fixed_meand.py @@ -16,7 +16,7 @@ import numpy as np from monai.transforms import RandScaleIntensityFixedMeand -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestRandScaleIntensityFixedMeand(NumpyImageTestCase2D): diff --git a/tests/test_rand_scale_intensityd.py b/tests/test_rand_scale_intensityd.py index 32c96f0313..4867369838 100644 --- a/tests/test_rand_scale_intensityd.py +++ b/tests/test_rand_scale_intensityd.py @@ -16,7 +16,7 @@ import numpy as np from monai.transforms import RandScaleIntensityd -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestRandScaleIntensityd(NumpyImageTestCase2D): diff --git a/tests/test_rand_shift_intensity.py b/tests/test_rand_shift_intensity.py index 907773ccf5..0e1ab77fed 100644 --- a/tests/test_rand_shift_intensity.py +++ b/tests/test_rand_shift_intensity.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandShiftIntensity -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestRandShiftIntensity(NumpyImageTestCase2D): diff --git a/tests/test_rand_shift_intensityd.py b/tests/test_rand_shift_intensityd.py index 51675e324c..af4c1648d3 100644 --- a/tests/test_rand_shift_intensityd.py +++ b/tests/test_rand_shift_intensityd.py @@ -17,7 +17,7 @@ from monai.transforms import IntensityStatsd, RandShiftIntensityd from monai.utils.enums import PostFix -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestRandShiftIntensityd(NumpyImageTestCase2D): diff --git a/tests/test_rand_simulate_low_resolution.py b/tests/test_rand_simulate_low_resolution.py index 6aa586fb0b..79e09b3f74 100644 --- a/tests/test_rand_simulate_low_resolution.py +++ b/tests/test_rand_simulate_low_resolution.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandSimulateLowResolution -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_simulate_low_resolutiond.py b/tests/test_rand_simulate_low_resolutiond.py index 5ec84eba1d..5b199a26f2 100644 --- a/tests/test_rand_simulate_low_resolutiond.py +++ b/tests/test_rand_simulate_low_resolutiond.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RandSimulateLowResolutiond -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_rand_spatial_crop.py b/tests/test_rand_spatial_crop.py index df121e2220..03d56daf61 100644 --- a/tests/test_rand_spatial_crop.py +++ b/tests/test_rand_spatial_crop.py @@ -20,7 +20,7 @@ from monai.transforms import RandScaleCrop, RandSpatialCrop from monai.transforms.lazy.functional import apply_pending from tests.croppers import CropTest -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TEST_SHAPES = [ [{"roi_size": [3, 3, -1], "random_center": True}, (3, 3, 3, 4), (3, 3, 3, 4)], diff --git a/tests/test_rand_spatial_crop_samples.py b/tests/test_rand_spatial_crop_samples.py index 92f0f9d9be..4a1d8697d4 100644 --- a/tests/test_rand_spatial_crop_samples.py +++ b/tests/test_rand_spatial_crop_samples.py @@ -20,7 +20,7 @@ from monai.transforms import RandSpatialCropSamples from monai.transforms.lazy.functional import apply_pending from tests.croppers import CropTest -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TEST_CASE_1 = [ {"roi_size": [3, 3, 3], "num_samples": 4, "random_center": True, "random_size": False}, diff --git a/tests/test_rand_spatial_crop_samplesd.py b/tests/test_rand_spatial_crop_samplesd.py index cb53e94b7d..80600f769f 100644 --- a/tests/test_rand_spatial_crop_samplesd.py +++ b/tests/test_rand_spatial_crop_samplesd.py @@ -19,7 +19,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import Compose, DivisiblePadd, RandSpatialCropSamplesd from monai.transforms.lazy.functional import apply_pending -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TEST_CASE_1 = [ {"keys": ["img", "seg"], "num_samples": 4, "roi_size": [2, 2, 2], "random_center": True, "random_size": True}, diff --git a/tests/test_rand_spatial_cropd.py b/tests/test_rand_spatial_cropd.py index 123459235f..a3aec5e525 100644 --- a/tests/test_rand_spatial_cropd.py +++ b/tests/test_rand_spatial_cropd.py @@ -20,7 +20,7 @@ from monai.transforms import RandScaleCropd, RandSpatialCropd from monai.transforms.lazy.functional import apply_pending from tests.croppers import CropTest -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TEST_SHAPES = [ [{"keys": "img", "roi_size": [3, 3, -1], "random_center": True}, (3, 3, 3, 5), (3, 3, 3, 5)], diff --git a/tests/test_rand_std_shift_intensity.py b/tests/test_rand_std_shift_intensity.py index 0ac5e9482e..66a7c2e4a9 100644 --- a/tests/test_rand_std_shift_intensity.py +++ b/tests/test_rand_std_shift_intensity.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import RandStdShiftIntensity -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestRandStdShiftIntensity(NumpyImageTestCase2D): diff --git a/tests/test_rand_std_shift_intensityd.py b/tests/test_rand_std_shift_intensityd.py index 1fd0c5d2a8..c90a068641 100644 --- a/tests/test_rand_std_shift_intensityd.py +++ b/tests/test_rand_std_shift_intensityd.py @@ -16,7 +16,7 @@ import numpy as np from monai.transforms import RandStdShiftIntensityd -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestRandStdShiftIntensityd(NumpyImageTestCase2D): diff --git a/tests/test_rand_torchiod.py b/tests/test_rand_torchiod.py index 52bcf7c576..041dec8e08 100644 --- a/tests/test_rand_torchiod.py +++ b/tests/test_rand_torchiod.py @@ -20,7 +20,7 @@ from monai.transforms import RandTorchIOd from monai.utils import optional_import, set_determinism -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose _, has_torchio = optional_import("torchio") diff --git a/tests/test_rand_weighted_crop.py b/tests/test_rand_weighted_crop.py index f509065a56..9d20faacb5 100644 --- a/tests/test_rand_weighted_crop.py +++ b/tests/test_rand_weighted_crop.py @@ -20,7 +20,7 @@ from monai.transforms.croppad.array import RandWeightedCrop from monai.transforms.lazy.functional import apply_pending from tests.croppers import CropTest -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose def get_data(ndim): diff --git a/tests/test_rand_weighted_cropd.py b/tests/test_rand_weighted_cropd.py index a1414df0ac..5c432f8605 100644 --- a/tests/test_rand_weighted_cropd.py +++ b/tests/test_rand_weighted_cropd.py @@ -19,7 +19,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms.croppad.dictionary import RandWeightedCropd from monai.transforms.lazy.functional import apply_pending -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose def get_data(ndim): diff --git a/tests/test_rand_zoom.py b/tests/test_rand_zoom.py index 2da04fd652..a1d309bfc6 100644 --- a/tests/test_rand_zoom.py +++ b/tests/test_rand_zoom.py @@ -22,7 +22,7 @@ from monai.transforms import RandZoom from monai.utils import InterpolateMode from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion VALID_CASES = [ (0.8, 1.2, "nearest", False), diff --git a/tests/test_rand_zoomd.py b/tests/test_rand_zoomd.py index bcbf188310..bf98dd6e3e 100644 --- a/tests/test_rand_zoomd.py +++ b/tests/test_rand_zoomd.py @@ -21,7 +21,7 @@ from monai.config import USE_COMPILED from monai.transforms import RandZoomd from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion VALID_CASES = [ (0.8, 1.2, "nearest", None, False), diff --git a/tests/test_randidentity.py b/tests/test_randidentity.py index 3a8936f2d2..65df216828 100644 --- a/tests/test_randidentity.py +++ b/tests/test_randidentity.py @@ -15,7 +15,7 @@ import monai.transforms as mt from monai.data import CacheDataset -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class T(mt.Transform): diff --git a/tests/test_randtorchvisiond.py b/tests/test_randtorchvisiond.py index 7ad06dfd2a..0606e854d5 100644 --- a/tests/test_randtorchvisiond.py +++ b/tests/test_randtorchvisiond.py @@ -18,7 +18,7 @@ from monai.transforms import RandomizableTrait, RandTorchVisiond from monai.utils import set_determinism -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [ {"keys": "img", "name": "ColorJitter"}, diff --git a/tests/test_rankfilter_dist.py b/tests/test_rankfilter_dist.py index fd02e3bdc9..1f4811a3c7 100644 --- a/tests/test_rankfilter_dist.py +++ b/tests/test_rankfilter_dist.py @@ -19,7 +19,7 @@ import torch.distributed as dist from monai.utils import RankFilter -from tests.utils import DistCall, DistTestCase +from tests.test_utils import DistCall, DistTestCase class DistributedRankFilterTest(DistTestCase): diff --git a/tests/test_recon_net_utils.py b/tests/test_recon_net_utils.py index 48d3b59a17..5f4a132624 100644 --- a/tests/test_recon_net_utils.py +++ b/tests/test_recon_net_utils.py @@ -27,7 +27,7 @@ sensitivity_map_expand, sensitivity_map_reduce, ) -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose # no need for checking devices, these functions don't change device format # reshape test case diff --git a/tests/test_reference_based_normalize_intensity.py b/tests/test_reference_based_normalize_intensity.py index 2d946af118..8f3f8f2451 100644 --- a/tests/test_reference_based_normalize_intensity.py +++ b/tests/test_reference_based_normalize_intensity.py @@ -18,7 +18,7 @@ from monai.apps.reconstruction.transforms.dictionary import ReferenceBasedNormalizeIntensityd from monai.utils.type_conversion import convert_to_numpy -from tests.utils import TEST_NDARRAYS_NO_META_TENSOR, assert_allclose +from tests.test_utils import TEST_NDARRAYS_NO_META_TENSOR, assert_allclose # see test_normalize_intensityd for typical tests (like non-zero # normalization, device test, etc.) diff --git a/tests/test_reference_based_spatial_cropd.py b/tests/test_reference_based_spatial_cropd.py index 83cd9c4a5d..13d8177b68 100644 --- a/tests/test_reference_based_spatial_cropd.py +++ b/tests/test_reference_based_spatial_cropd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.apps.reconstruction.transforms.dictionary import ReferenceBasedSpatialCropd -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS # see test_spatial_cropd for typical tests (like roi_start, # roi_slices, etc.) diff --git a/tests/test_reg_loss_integration.py b/tests/test_reg_loss_integration.py index 8afc2da6ad..c29b29de43 100644 --- a/tests/test_reg_loss_integration.py +++ b/tests/test_reg_loss_integration.py @@ -19,7 +19,7 @@ from parameterized import parameterized from monai.losses import BendingEnergyLoss, GlobalMutualInformationLoss, LocalNormalizedCrossCorrelationLoss -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion TEST_CASES = [ [BendingEnergyLoss, {}, ["pred"], 3], diff --git a/tests/test_regularization.py b/tests/test_regularization.py index 12d64637d5..120d574911 100644 --- a/tests/test_regularization.py +++ b/tests/test_regularization.py @@ -17,7 +17,7 @@ import torch from monai.transforms import CutMix, CutMixd, CutOut, CutOutd, MixUp, MixUpd -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestMixup(unittest.TestCase): diff --git a/tests/test_regunet.py b/tests/test_regunet.py index 3100d7660c..1fcab5e554 100644 --- a/tests/test_regunet.py +++ b/tests/test_regunet.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets.regunet import RegUNet -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_remove_repeated_channel.py b/tests/test_remove_repeated_channel.py index 7da00ee75d..fd03f39c70 100644 --- a/tests/test_remove_repeated_channel.py +++ b/tests/test_remove_repeated_channel.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import RemoveRepeatedChannel -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TEST_CASES = [] for q in TEST_NDARRAYS: diff --git a/tests/test_remove_repeated_channeld.py b/tests/test_remove_repeated_channeld.py index 08ec7fb44c..d6c19af212 100644 --- a/tests/test_remove_repeated_channeld.py +++ b/tests/test_remove_repeated_channeld.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RemoveRepeatedChanneld -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_remove_small_objects.py b/tests/test_remove_small_objects.py index 633a6d9a99..1324fc55f6 100644 --- a/tests/test_remove_small_objects.py +++ b/tests/test_remove_small_objects.py @@ -21,7 +21,7 @@ from monai.transforms.post.array import RemoveSmallObjects from monai.transforms.post.dictionary import RemoveSmallObjectsd from monai.utils import optional_import -from tests.utils import TEST_NDARRAYS, SkipIfNoModule, assert_allclose +from tests.test_utils import TEST_NDARRAYS, SkipIfNoModule, assert_allclose morphology, has_morphology = optional_import("skimage.morphology") diff --git a/tests/test_repeat_channel.py b/tests/test_repeat_channel.py index 82d1d92bd2..b2fc2ad71d 100644 --- a/tests/test_repeat_channel.py +++ b/tests/test_repeat_channel.py @@ -16,7 +16,7 @@ from parameterized import parameterized from monai.transforms import RepeatChannel -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_repeat_channeld.py b/tests/test_repeat_channeld.py index 2be13a08d1..b38e09c28d 100644 --- a/tests/test_repeat_channeld.py +++ b/tests/test_repeat_channeld.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import RepeatChanneld -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_replace_module.py b/tests/test_replace_module.py index f3964ac65d..d3fc105292 100644 --- a/tests/test_replace_module.py +++ b/tests/test_replace_module.py @@ -18,7 +18,7 @@ from monai.networks.nets import DenseNet121 from monai.networks.utils import replace_modules, replace_modules_temp -from tests.utils import TEST_DEVICES +from tests.test_utils import TEST_DEVICES TESTS = [] for device in TEST_DEVICES: diff --git a/tests/test_resample.py b/tests/test_resample.py index 68b08b8b87..3c5742d14e 100644 --- a/tests/test_resample.py +++ b/tests/test_resample.py @@ -18,7 +18,7 @@ from monai.transforms.lazy.functional import resample from monai.utils import convert_to_tensor -from tests.utils import assert_allclose, get_arange_img +from tests.test_utils import assert_allclose, get_arange_img def rotate_90_2d(): diff --git a/tests/test_resample_backends.py b/tests/test_resample_backends.py index 7ddd9c7ec2..a920d59b8f 100644 --- a/tests/test_resample_backends.py +++ b/tests/test_resample_backends.py @@ -22,7 +22,7 @@ from monai.transforms import Resample from monai.transforms.utils import create_grid from monai.utils import GridSampleMode, GridSamplePadMode, NdimageMode, SplineMode, convert_to_numpy -from tests.utils import SkipIfBeforePyTorchVersion, assert_allclose, is_tf32_env +from tests.test_utils import SkipIfBeforePyTorchVersion, assert_allclose, is_tf32_env _rtol = 1e-3 if is_tf32_env() else 1e-4 diff --git a/tests/test_resample_to_match.py b/tests/test_resample_to_match.py index f0d34547a7..4420b0b061 100644 --- a/tests/test_resample_to_match.py +++ b/tests/test_resample_to_match.py @@ -30,7 +30,7 @@ from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, ResampleToMatch, SaveImage, SaveImaged from monai.utils import optional_import from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import assert_allclose, download_url_or_skip_test, testing_data_config +from tests.test_utils import assert_allclose, download_url_or_skip_test, testing_data_config _, has_itk = optional_import("itk", allow_namespace_pkg=True) diff --git a/tests/test_resample_to_matchd.py b/tests/test_resample_to_matchd.py index 9d104bf392..fd8a419bfd 100644 --- a/tests/test_resample_to_matchd.py +++ b/tests/test_resample_to_matchd.py @@ -27,7 +27,7 @@ SaveImaged, ) from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import assert_allclose, download_url_or_skip_test, testing_data_config +from tests.test_utils import assert_allclose, download_url_or_skip_test, testing_data_config def update_fname(d): diff --git a/tests/test_resampler.py b/tests/test_resampler.py index af0db657aa..5e6d7d0e8e 100644 --- a/tests/test_resampler.py +++ b/tests/test_resampler.py @@ -19,7 +19,7 @@ from monai.transforms import Resample from monai.transforms.utils import create_grid -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TESTS = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_resize.py b/tests/test_resize.py index d4c57e2742..23784f5461 100644 --- a/tests/test_resize.py +++ b/tests/test_resize.py @@ -21,7 +21,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import Resize from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import ( +from tests.test_utils import ( TEST_NDARRAYS_ALL, NumpyImageTestCase2D, SkipIfAtLeastPyTorchVersion, diff --git a/tests/test_resize_with_pad_or_crop.py b/tests/test_resize_with_pad_or_crop.py index daf257f89f..c80f7d38e8 100644 --- a/tests/test_resize_with_pad_or_crop.py +++ b/tests/test_resize_with_pad_or_crop.py @@ -20,7 +20,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import ResizeWithPadOrCrop from monai.transforms.lazy.functional import apply_pending -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, pytorch_after +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, pytorch_after TEST_CASES = [ [{"spatial_size": [15, 8, 8], "mode": "constant"}, (3, 8, 8, 4), (3, 15, 8, 8), True], diff --git a/tests/test_resize_with_pad_or_cropd.py b/tests/test_resize_with_pad_or_cropd.py index 391e0feb22..04f7c16622 100644 --- a/tests/test_resize_with_pad_or_cropd.py +++ b/tests/test_resize_with_pad_or_cropd.py @@ -22,7 +22,7 @@ from monai.transforms import ResizeWithPadOrCropd from monai.transforms.lazy.functional import apply_pending from tests.test_resize_with_pad_or_crop import TESTS_PENDING_MODE -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, pytorch_after +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, pytorch_after TEST_CASES = [ [{"keys": "img", "spatial_size": [15, 8, 8], "mode": "constant"}, {"img": np.zeros((3, 8, 8, 4))}, (3, 15, 8, 8)], diff --git a/tests/test_resized.py b/tests/test_resized.py index 243a4e6622..35491a9eb0 100644 --- a/tests/test_resized.py +++ b/tests/test_resized.py @@ -21,7 +21,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import Invertd, Resize, Resized from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import ( +from tests.test_utils import ( TEST_NDARRAYS_ALL, NumpyImageTestCase2D, SkipIfAtLeastPyTorchVersion, diff --git a/tests/test_resnet.py b/tests/test_resnet.py index a55d18f5de..ad5ee322e4 100644 --- a/tests/test_resnet.py +++ b/tests/test_resnet.py @@ -37,7 +37,7 @@ ) from monai.networks.nets.resnet import ResNetBlock from monai.utils import optional_import -from tests.utils import ( +from tests.test_utils import ( SkipIfNoModule, equal_state_dict, skip_if_downloading_fails, diff --git a/tests/test_retinanet.py b/tests/test_retinanet.py index f36708d5b3..a24f5b208c 100644 --- a/tests/test_retinanet.py +++ b/tests/test_retinanet.py @@ -20,7 +20,7 @@ from monai.networks import eval_mode from monai.networks.nets import resnet10, resnet18, resnet34, resnet50, resnet101, resnet152, resnet200 from monai.utils import ensure_tuple, optional_import -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_onnx_save, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_onnx_save, test_script_save _, has_torchvision = optional_import("torchvision") diff --git a/tests/test_retinanet_detector.py b/tests/test_retinanet_detector.py index 691254fd87..e5ff7e211a 100644 --- a/tests/test_retinanet_detector.py +++ b/tests/test_retinanet_detector.py @@ -21,7 +21,7 @@ from monai.apps.detection.utils.anchor_utils import AnchorGeneratorWithAnchorShape from monai.networks import eval_mode, train_mode from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_script_save _, has_torchvision = optional_import("torchvision") diff --git a/tests/test_rotate.py b/tests/test_rotate.py index 19fbd1409f..fda1d212a8 100644 --- a/tests/test_rotate.py +++ b/tests/test_rotate.py @@ -22,7 +22,13 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import Rotate from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import HAS_CUPY, TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, test_local_inversion +from tests.test_utils import ( + HAS_CUPY, + TEST_NDARRAYS_ALL, + NumpyImageTestCase2D, + NumpyImageTestCase3D, + test_local_inversion, +) TEST_CASES_2D: list[tuple] = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_rotate90.py b/tests/test_rotate90.py index ebc3fba7e0..93e4f19603 100644 --- a/tests/test_rotate90.py +++ b/tests/test_rotate90.py @@ -21,7 +21,7 @@ from monai.transforms.lazy.functional import apply_pending from monai.utils import optional_import from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import ( +from tests.test_utils import ( TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, diff --git a/tests/test_rotate90d.py b/tests/test_rotate90d.py index ffe920992a..09adfd3411 100644 --- a/tests/test_rotate90d.py +++ b/tests/test_rotate90d.py @@ -18,7 +18,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import Rotate90d from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion class TestRotate90d(NumpyImageTestCase2D): diff --git a/tests/test_rotated.py b/tests/test_rotated.py index 28ca755661..904cf3718c 100644 --- a/tests/test_rotated.py +++ b/tests/test_rotated.py @@ -22,7 +22,7 @@ from monai.data import MetaTensor from monai.transforms import Rotated from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, NumpyImageTestCase3D, test_local_inversion TEST_CASES_2D: list[tuple] = [] for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_safe_dtype_range.py b/tests/test_safe_dtype_range.py index 61b55635ae..ffbf5dba7d 100644 --- a/tests/test_safe_dtype_range.py +++ b/tests/test_safe_dtype_range.py @@ -19,7 +19,7 @@ from monai.utils import optional_import from monai.utils.type_conversion import get_equivalent_dtype, safe_dtype_range -from tests.utils import HAS_CUPY, TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import HAS_CUPY, TEST_NDARRAYS_ALL, assert_allclose cp, _ = optional_import("cupy") diff --git a/tests/test_sample_slices.py b/tests/test_sample_slices.py index a183689970..79ebcbda05 100644 --- a/tests/test_sample_slices.py +++ b/tests/test_sample_slices.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.utils import sample_slices -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose # test data[:, [1, ], ...] TEST_CASE_1 = [torch.tensor([[[0, 2], [1, 0]]]), 1, True, (1,), torch.tensor([[[1, 0]]])] diff --git a/tests/test_sampler_dist.py b/tests/test_sampler_dist.py index b8bd1c7a9f..cd0dbc07e2 100644 --- a/tests/test_sampler_dist.py +++ b/tests/test_sampler_dist.py @@ -20,7 +20,7 @@ from monai.data import CacheDataset, DataLoader, DistributedSampler from monai.transforms import ToTensor -from tests.utils import DistCall, DistTestCase, assert_allclose +from tests.test_utils import DistCall, DistTestCase, assert_allclose class DistributedSamplerTest(DistTestCase): diff --git a/tests/test_savitzky_golay_filter.py b/tests/test_savitzky_golay_filter.py index 7c60287e2d..caa1b5c0af 100644 --- a/tests/test_savitzky_golay_filter.py +++ b/tests/test_savitzky_golay_filter.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.networks.layers import SavitzkyGolayFilter -from tests.utils import skip_if_no_cuda +from tests.test_utils import skip_if_no_cuda # Zero-padding trivial tests diff --git a/tests/test_savitzky_golay_smooth.py b/tests/test_savitzky_golay_smooth.py index 14e403e238..7516f40029 100644 --- a/tests/test_savitzky_golay_smooth.py +++ b/tests/test_savitzky_golay_smooth.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import SavitzkyGolaySmooth -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose # Zero-padding trivial tests diff --git a/tests/test_savitzky_golay_smoothd.py b/tests/test_savitzky_golay_smoothd.py index 3bb4056046..f347e7a017 100644 --- a/tests/test_savitzky_golay_smoothd.py +++ b/tests/test_savitzky_golay_smoothd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import SavitzkyGolaySmoothd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose # Zero-padding trivial tests diff --git a/tests/test_scale_intensity.py b/tests/test_scale_intensity.py index 17dfe305b2..42ea598369 100644 --- a/tests/test_scale_intensity.py +++ b/tests/test_scale_intensity.py @@ -18,7 +18,7 @@ from monai.transforms import ScaleIntensity from monai.transforms.utils import rescale_array -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestScaleIntensity(NumpyImageTestCase2D): diff --git a/tests/test_scale_intensity_fixed_mean.py b/tests/test_scale_intensity_fixed_mean.py index 35d38ef0b1..da82dc8f5c 100644 --- a/tests/test_scale_intensity_fixed_mean.py +++ b/tests/test_scale_intensity_fixed_mean.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import ScaleIntensityFixedMean -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestScaleIntensityFixedMean(NumpyImageTestCase2D): diff --git a/tests/test_scale_intensity_range.py b/tests/test_scale_intensity_range.py index 6013a237db..cb4df12a93 100644 --- a/tests/test_scale_intensity_range.py +++ b/tests/test_scale_intensity_range.py @@ -16,7 +16,7 @@ import numpy as np from monai.transforms import ScaleIntensityRange -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class IntensityScaleIntensityRange(NumpyImageTestCase2D): diff --git a/tests/test_scale_intensity_range_percentiles.py b/tests/test_scale_intensity_range_percentiles.py index a7390efe72..bd26497f3e 100644 --- a/tests/test_scale_intensity_range_percentiles.py +++ b/tests/test_scale_intensity_range_percentiles.py @@ -17,7 +17,7 @@ import torch from monai.transforms.intensity.array import ScaleIntensityRangePercentiles -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestScaleIntensityRangePercentiles(NumpyImageTestCase2D): diff --git a/tests/test_scale_intensity_range_percentilesd.py b/tests/test_scale_intensity_range_percentilesd.py index ab0347fbbf..2dd1642cff 100644 --- a/tests/test_scale_intensity_range_percentilesd.py +++ b/tests/test_scale_intensity_range_percentilesd.py @@ -16,7 +16,7 @@ import numpy as np from monai.transforms.intensity.dictionary import ScaleIntensityRangePercentilesd -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestScaleIntensityRangePercentilesd(NumpyImageTestCase2D): diff --git a/tests/test_scale_intensity_ranged.py b/tests/test_scale_intensity_ranged.py index cc3f1220e7..16477bcf61 100644 --- a/tests/test_scale_intensity_ranged.py +++ b/tests/test_scale_intensity_ranged.py @@ -14,7 +14,7 @@ import unittest from monai.transforms import ScaleIntensityRanged -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class IntensityScaleIntensityRanged(NumpyImageTestCase2D): diff --git a/tests/test_scale_intensityd.py b/tests/test_scale_intensityd.py index 88beece894..ef6b9b587c 100644 --- a/tests/test_scale_intensityd.py +++ b/tests/test_scale_intensityd.py @@ -16,7 +16,7 @@ import numpy as np from monai.transforms import ScaleIntensityd -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestScaleIntensityd(NumpyImageTestCase2D): diff --git a/tests/test_scheduler_ddim.py b/tests/test_scheduler_ddim.py index 1a8f8cab67..aa246c3ee1 100644 --- a/tests/test_scheduler_ddim.py +++ b/tests/test_scheduler_ddim.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.networks.schedulers import DDIMScheduler -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_2D_CASE = [] for beta_schedule in ["linear_beta", "scaled_linear_beta"]: diff --git a/tests/test_scheduler_ddpm.py b/tests/test_scheduler_ddpm.py index f0447aded2..60ccde1439 100644 --- a/tests/test_scheduler_ddpm.py +++ b/tests/test_scheduler_ddpm.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.networks.schedulers import DDPMScheduler -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_2D_CASE = [] for beta_schedule in ["linear_beta", "scaled_linear_beta"]: diff --git a/tests/test_scheduler_pndm.py b/tests/test_scheduler_pndm.py index 69e5e403f5..9f48e2e383 100644 --- a/tests/test_scheduler_pndm.py +++ b/tests/test_scheduler_pndm.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.networks.schedulers import PNDMScheduler -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_2D_CASE = [] for beta_schedule in ["linear_beta", "scaled_linear_beta"]: diff --git a/tests/test_se_block.py b/tests/test_se_block.py index ca60643635..0b0ac63f16 100644 --- a/tests/test_se_block.py +++ b/tests/test_se_block.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.blocks import SEBlock from monai.networks.layers.factories import Act, Norm -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_se_blocks.py b/tests/test_se_blocks.py index c1e72749cc..12d4d1a36d 100644 --- a/tests/test_se_blocks.py +++ b/tests/test_se_blocks.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.blocks import ChannelSELayer, ResidualSELayer -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASES = [ # single channel 3D, batch 16 [{"spatial_dims": 2, "in_channels": 4, "r": 3}, (7, 4, 64, 48), (7, 4, 64, 48)], # 4-channel 2D, batch 7 diff --git a/tests/test_segresnet.py b/tests/test_segresnet.py index 728699c434..82f530cb8d 100644 --- a/tests/test_segresnet.py +++ b/tests/test_segresnet.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.nets import SegResNet, SegResNetVAE from monai.utils import UpsampleMode -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_segresnet_ds.py b/tests/test_segresnet_ds.py index eab7bac9a0..858d958f1c 100644 --- a/tests/test_segresnet_ds.py +++ b/tests/test_segresnet_ds.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets import SegResNetDS, SegResNetDS2 -from tests.utils import SkipIfBeforePyTorchVersion, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" TEST_CASE_SEGRESNET_DS = [] diff --git a/tests/test_selfattention.py b/tests/test_selfattention.py index 338f1bf840..21302141e0 100644 --- a/tests/test_selfattention.py +++ b/tests/test_selfattention.py @@ -22,7 +22,7 @@ from monai.networks.blocks.selfattention import SABlock from monai.networks.layers.factories import RelPosEmbedding from monai.utils import optional_import -from tests.utils import SkipIfBeforePyTorchVersion, assert_allclose, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, assert_allclose, test_script_save einops, has_einops = optional_import("einops") diff --git a/tests/test_senet.py b/tests/test_senet.py index 6809d4562b..a1dc11e4cc 100644 --- a/tests/test_senet.py +++ b/tests/test_senet.py @@ -23,7 +23,7 @@ from monai.networks import eval_mode from monai.networks.nets import SENet, SENet154, SEResNet50, SEResNet101, SEResNet152, SEResNext50, SEResNext101 from monai.utils import optional_import -from tests.utils import test_is_quick, test_pretrained_networks, test_script_save, testing_data_config +from tests.test_utils import test_is_quick, test_pretrained_networks, test_script_save, testing_data_config if TYPE_CHECKING: import pretrainedmodels diff --git a/tests/test_set_determinism.py b/tests/test_set_determinism.py index 7d64aed244..d5b578f1c8 100644 --- a/tests/test_set_determinism.py +++ b/tests/test_set_determinism.py @@ -17,7 +17,7 @@ import torch from monai.utils import get_seed, set_determinism -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_no_cuda +from tests.test_utils import SkipIfBeforePyTorchVersion, skip_if_no_cuda class TestSetDeterminism(unittest.TestCase): diff --git a/tests/test_set_visible_devices.py b/tests/test_set_visible_devices.py index b4f44957a2..077a382962 100644 --- a/tests/test_set_visible_devices.py +++ b/tests/test_set_visible_devices.py @@ -14,7 +14,7 @@ import os import unittest -from tests.utils import SkipIfAtLeastPyTorchVersion, skip_if_no_cuda +from tests.test_utils import SkipIfAtLeastPyTorchVersion, skip_if_no_cuda class TestVisibleDevices(unittest.TestCase): diff --git a/tests/test_shift_intensity.py b/tests/test_shift_intensity.py index 90aa0f9271..1f15f92a51 100644 --- a/tests/test_shift_intensity.py +++ b/tests/test_shift_intensity.py @@ -16,7 +16,7 @@ import numpy as np from monai.transforms import ShiftIntensity -from tests.utils import NumpyImageTestCase2D +from tests.test_utils import NumpyImageTestCase2D class TestShiftIntensity(NumpyImageTestCase2D): diff --git a/tests/test_shift_intensityd.py b/tests/test_shift_intensityd.py index 22336b4415..b7d8f1be04 100644 --- a/tests/test_shift_intensityd.py +++ b/tests/test_shift_intensityd.py @@ -17,7 +17,7 @@ from monai.transforms import IntensityStatsd, ShiftIntensityd from monai.utils.enums import PostFix -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestShiftIntensityd(NumpyImageTestCase2D): diff --git a/tests/test_shuffle_buffer.py b/tests/test_shuffle_buffer.py index e75321616b..e5c27e51a5 100644 --- a/tests/test_shuffle_buffer.py +++ b/tests/test_shuffle_buffer.py @@ -18,7 +18,7 @@ from monai.data import DataLoader, ShuffleBuffer from monai.utils import convert_data_type -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion @SkipIfBeforePyTorchVersion((1, 12)) diff --git a/tests/test_signal_fillempty.py b/tests/test_signal_fillempty.py index 2be4bd8600..b32c9924b3 100644 --- a/tests/test_signal_fillempty.py +++ b/tests/test_signal_fillempty.py @@ -19,7 +19,7 @@ from monai.transforms import SignalFillEmpty from monai.utils.type_conversion import convert_to_tensor -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") diff --git a/tests/test_signal_fillemptyd.py b/tests/test_signal_fillemptyd.py index 7710279495..d287e83bda 100644 --- a/tests/test_signal_fillemptyd.py +++ b/tests/test_signal_fillemptyd.py @@ -19,7 +19,7 @@ from monai.transforms import SignalFillEmptyd from monai.utils.type_conversion import convert_to_tensor -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") diff --git a/tests/test_signal_rand_add_squarepulse.py b/tests/test_signal_rand_add_squarepulse.py index e1432029ea..552d35f55c 100644 --- a/tests/test_signal_rand_add_squarepulse.py +++ b/tests/test_signal_rand_add_squarepulse.py @@ -21,7 +21,7 @@ from monai.transforms import SignalRandAddSquarePulse from monai.utils import optional_import from monai.utils.type_conversion import convert_to_tensor -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion _, has_scipy = optional_import("scipy") TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") diff --git a/tests/test_signal_rand_add_squarepulse_partial.py b/tests/test_signal_rand_add_squarepulse_partial.py index 7e1c2bb9d8..9ac564c2c1 100644 --- a/tests/test_signal_rand_add_squarepulse_partial.py +++ b/tests/test_signal_rand_add_squarepulse_partial.py @@ -21,7 +21,7 @@ from monai.transforms import SignalRandAddSquarePulsePartial from monai.utils import optional_import from monai.utils.type_conversion import convert_to_tensor -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion _, has_scipy = optional_import("scipy") TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") diff --git a/tests/test_simulatedelay.py b/tests/test_simulatedelay.py index 0a4f23450a..489a9f30d0 100644 --- a/tests/test_simulatedelay.py +++ b/tests/test_simulatedelay.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms.utility.array import SimulateDelay -from tests.utils import NumpyImageTestCase2D +from tests.test_utils import NumpyImageTestCase2D class TestSimulateDelay(NumpyImageTestCase2D): diff --git a/tests/test_simulatedelayd.py b/tests/test_simulatedelayd.py index 419e21f24d..9eac4a0e66 100644 --- a/tests/test_simulatedelayd.py +++ b/tests/test_simulatedelayd.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms.utility.dictionary import SimulateDelayd -from tests.utils import NumpyImageTestCase2D +from tests.test_utils import NumpyImageTestCase2D class TestSimulateDelay(NumpyImageTestCase2D): diff --git a/tests/test_sliding_patch_wsi_dataset.py b/tests/test_sliding_patch_wsi_dataset.py index 6369613426..8664e865a3 100644 --- a/tests/test_sliding_patch_wsi_dataset.py +++ b/tests/test_sliding_patch_wsi_dataset.py @@ -21,7 +21,7 @@ from monai.data import SlidingPatchWSIDataset from monai.utils import WSIPatchKeys, optional_import, set_determinism -from tests.utils import download_url_or_skip_test, testing_data_config +from tests.test_utils import download_url_or_skip_test, testing_data_config set_determinism(0) diff --git a/tests/test_sliding_window_inference.py b/tests/test_sliding_window_inference.py index 33b38a5bc7..5949080405 100644 --- a/tests/test_sliding_window_inference.py +++ b/tests/test_sliding_window_inference.py @@ -21,7 +21,7 @@ from monai.data.utils import list_data_collate from monai.inferers import SlidingWindowInferer, SlidingWindowInfererAdapt, sliding_window_inference from monai.utils import optional_import -from tests.utils import TEST_TORCH_AND_META_TENSORS, skip_if_no_cuda, test_is_quick +from tests.test_utils import TEST_TORCH_AND_META_TENSORS, skip_if_no_cuda, test_is_quick _, has_tqdm = optional_import("tqdm") diff --git a/tests/test_sliding_window_splitter.py b/tests/test_sliding_window_splitter.py index ad136c61a4..daf1fcdc91 100644 --- a/tests/test_sliding_window_splitter.py +++ b/tests/test_sliding_window_splitter.py @@ -18,7 +18,7 @@ from torch.nn.functional import pad from monai.inferers import SlidingWindowSplitter -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose # ---------------------------------------------------------------------------- # Tensor test cases diff --git a/tests/test_smartcachedataset.py b/tests/test_smartcachedataset.py index bb43060469..1c55961d85 100644 --- a/tests/test_smartcachedataset.py +++ b/tests/test_smartcachedataset.py @@ -24,7 +24,7 @@ from monai.data import DataLoader, SmartCacheDataset from monai.transforms import Compose, Lambda, LoadImaged -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [0.1, 0, Compose([LoadImaged(keys=["image", "label", "extra"])])] diff --git a/tests/test_smooth_field.py b/tests/test_smooth_field.py index ca010641c4..45af048ebc 100644 --- a/tests/test_smooth_field.py +++ b/tests/test_smooth_field.py @@ -20,7 +20,7 @@ from monai.networks.utils import meshgrid_xy from monai.transforms import RandSmoothDeformd, RandSmoothFieldAdjustContrastd, RandSmoothFieldAdjustIntensityd -from tests.utils import TEST_NDARRAYS, assert_allclose, is_tf32_env +from tests.test_utils import TEST_NDARRAYS, assert_allclose, is_tf32_env _rtol = 5e-3 if is_tf32_env() else 1e-4 diff --git a/tests/test_sobel_gradient.py b/tests/test_sobel_gradient.py index a0d7cf5a8b..29db34df01 100644 --- a/tests/test_sobel_gradient.py +++ b/tests/test_sobel_gradient.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import SobelGradients -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose IMAGE = torch.zeros(1, 16, 16, dtype=torch.float32) IMAGE[0, 8, :] = 1 diff --git a/tests/test_sobel_gradientd.py b/tests/test_sobel_gradientd.py index 03524823a5..aa8af3be89 100644 --- a/tests/test_sobel_gradientd.py +++ b/tests/test_sobel_gradientd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import SobelGradientsd -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose IMAGE = torch.zeros(1, 16, 16, dtype=torch.float32) IMAGE[0, 8, :] = 1 diff --git a/tests/test_spacing.py b/tests/test_spacing.py index c9a6291c78..1b1f5af237 100644 --- a/tests/test_spacing.py +++ b/tests/test_spacing.py @@ -24,7 +24,7 @@ from monai.transforms import Spacing from monai.utils import fall_back_tuple from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_DEVICES, TEST_NDARRAYS_ALL, assert_allclose, skip_if_quick +from tests.test_utils import TEST_DEVICES, TEST_NDARRAYS_ALL, assert_allclose, skip_if_quick TESTS: list[list] = [] for device in TEST_DEVICES: diff --git a/tests/test_spacingd.py b/tests/test_spacingd.py index 1cecaabced..6bb4ed542c 100644 --- a/tests/test_spacingd.py +++ b/tests/test_spacingd.py @@ -23,7 +23,7 @@ from monai.transforms import Spacingd from monai.utils import ensure_tuple_rep from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_DEVICES, assert_allclose, skip_if_quick +from tests.test_utils import TEST_DEVICES, assert_allclose, skip_if_quick TESTS: list[tuple] = [] for device in TEST_DEVICES: diff --git a/tests/test_spatial_combine_transforms.py b/tests/test_spatial_combine_transforms.py index 8479e9084b..6cb4c53ad4 100644 --- a/tests/test_spatial_combine_transforms.py +++ b/tests/test_spatial_combine_transforms.py @@ -24,7 +24,7 @@ from monai.transforms.transform import MapTransform from monai.utils import set_determinism from tests.lazy_transforms_utils import get_apply_param -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_2D = [ [ diff --git a/tests/test_spatial_resample.py b/tests/test_spatial_resample.py index e64b242128..874b45f9b3 100644 --- a/tests/test_spatial_resample.py +++ b/tests/test_spatial_resample.py @@ -24,7 +24,7 @@ from monai.transforms import SpatialResample from monai.utils import optional_import from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_DEVICES, TEST_NDARRAYS_ALL, assert_allclose +from tests.test_utils import TEST_DEVICES, TEST_NDARRAYS_ALL, assert_allclose TESTS = [] diff --git a/tests/test_spatial_resampled.py b/tests/test_spatial_resampled.py index d5c86258d7..0576b3a826 100644 --- a/tests/test_spatial_resampled.py +++ b/tests/test_spatial_resampled.py @@ -22,7 +22,7 @@ from monai.data.utils import to_affine_nd from monai.transforms.spatial.dictionary import SpatialResampled from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_DEVICES, assert_allclose +from tests.test_utils import TEST_DEVICES, assert_allclose ON_AARCH64 = platform.machine() == "aarch64" if ON_AARCH64: diff --git a/tests/test_spectral_loss.py b/tests/test_spectral_loss.py index f62ae9030b..dbc64ca73b 100644 --- a/tests/test_spectral_loss.py +++ b/tests/test_spectral_loss.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.losses import JukeboxLoss -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASES = [ [ diff --git a/tests/test_splitdim.py b/tests/test_splitdim.py index f557f44142..e0eaca182f 100644 --- a/tests/test_splitdim.py +++ b/tests/test_splitdim.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms.utility.array import SplitDim -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_splitdimd.py b/tests/test_splitdimd.py index b01913269d..6e221d3d52 100644 --- a/tests/test_splitdimd.py +++ b/tests/test_splitdimd.py @@ -21,7 +21,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import LoadImaged from monai.transforms.utility.dictionary import SplitDimd -from tests.utils import TEST_NDARRAYS, assert_allclose, make_nifti_image, make_rand_affine +from tests.test_utils import TEST_NDARRAYS, assert_allclose, make_nifti_image, make_rand_affine TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_squeezedim.py b/tests/test_squeezedim.py index a295d20ef5..477eef92c2 100644 --- a/tests/test_squeezedim.py +++ b/tests/test_squeezedim.py @@ -18,7 +18,7 @@ from monai.data import MetaTensor from monai.transforms import SqueezeDim -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS, TESTS_FAIL = [], [] for p in TEST_NDARRAYS: diff --git a/tests/test_squeezedimd.py b/tests/test_squeezedimd.py index 934479563d..d97a05b6f8 100644 --- a/tests/test_squeezedimd.py +++ b/tests/test_squeezedimd.py @@ -18,7 +18,7 @@ from monai.data import MetaTensor from monai.transforms import SqueezeDimd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS, TESTS_FAIL = [], [] for p in TEST_NDARRAYS: diff --git a/tests/test_std_shift_intensity.py b/tests/test_std_shift_intensity.py index b4dc1db568..8d0469698a 100644 --- a/tests/test_std_shift_intensity.py +++ b/tests/test_std_shift_intensity.py @@ -17,7 +17,7 @@ from monai.transforms import ShiftIntensity, StdShiftIntensity from monai.utils import dtype_numpy_to_torch -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose class TestStdShiftIntensity(NumpyImageTestCase2D): diff --git a/tests/test_std_shift_intensityd.py b/tests/test_std_shift_intensityd.py index 73617ef4a3..4aa01ce31f 100644 --- a/tests/test_std_shift_intensityd.py +++ b/tests/test_std_shift_intensityd.py @@ -17,7 +17,7 @@ from monai.transforms import ShiftIntensityd, StdShiftIntensityd from monai.utils import dtype_numpy_to_torch -from tests.utils import NumpyImageTestCase2D +from tests.test_utils import NumpyImageTestCase2D class TestStdShiftIntensityd(NumpyImageTestCase2D): diff --git a/tests/test_subpixel_upsample.py b/tests/test_subpixel_upsample.py index fe9fb1c328..5702f3f182 100644 --- a/tests/test_subpixel_upsample.py +++ b/tests/test_subpixel_upsample.py @@ -20,7 +20,7 @@ from monai.networks import eval_mode from monai.networks.blocks import SubpixelUpsample from monai.networks.layers.factories import Conv -from tests.utils import SkipIfBeforePyTorchVersion, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, test_script_save TEST_CASE_SUBPIXEL = [] for inch in range(1, 5): diff --git a/tests/test_surface_dice.py b/tests/test_surface_dice.py index 2ef19a4eea..736548117e 100644 --- a/tests/test_surface_dice.py +++ b/tests/test_surface_dice.py @@ -18,7 +18,7 @@ import torch.nn.functional as F from monai.metrics.surface_dice import SurfaceDiceMetric, compute_surface_dice -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose _device = "cuda:0" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_swin_unetr.py b/tests/test_swin_unetr.py index 5b33475c7e..08dee959bb 100644 --- a/tests/test_swin_unetr.py +++ b/tests/test_swin_unetr.py @@ -24,7 +24,7 @@ from monai.networks.nets.swin_unetr import PatchMerging, PatchMergingV2, SwinUNETR, filter_swinunetr from monai.networks.utils import copy_model_state from monai.utils import optional_import -from tests.utils import ( +from tests.test_utils import ( assert_allclose, pytorch_after, skip_if_downloading_fails, diff --git a/tests/test_tciadataset.py b/tests/test_tciadataset.py index 5a16bb4816..7c12daf954 100644 --- a/tests/test_tciadataset.py +++ b/tests/test_tciadataset.py @@ -19,7 +19,7 @@ from monai.apps.tcia import DCM_FILENAME_REGEX, TCIA_LABEL_DICT from monai.data import MetaTensor from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, ScaleIntensityd -from tests.utils import skip_if_downloading_fails, skip_if_quick +from tests.test_utils import skip_if_downloading_fails, skip_if_quick class TestTciaDataset(unittest.TestCase): diff --git a/tests/test_testtimeaugmentation.py b/tests/test_testtimeaugmentation.py index 746ad122b2..81d5e580f7 100644 --- a/tests/test_testtimeaugmentation.py +++ b/tests/test_testtimeaugmentation.py @@ -37,7 +37,7 @@ from monai.transforms.spatial.dictionary import RandFlipd from monai.utils import optional_import, set_determinism from monai.utils.enums import PostFix -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS if TYPE_CHECKING: import tqdm diff --git a/tests/test_text_encoding.py b/tests/test_text_encoding.py index 902f7a4b1d..83093b151f 100644 --- a/tests/test_text_encoding.py +++ b/tests/test_text_encoding.py @@ -14,7 +14,7 @@ import unittest from monai.networks.blocks.text_embedding import TextEncoder -from tests.utils import skip_if_downloading_fails +from tests.test_utils import skip_if_downloading_fails class TestTextEncoder(unittest.TestCase): diff --git a/tests/test_thread_buffer.py b/tests/test_thread_buffer.py index 2b7da2c0b0..cd7abc8dd4 100644 --- a/tests/test_thread_buffer.py +++ b/tests/test_thread_buffer.py @@ -20,7 +20,7 @@ from monai.data import DataLoader, Dataset, ThreadBuffer, ThreadDataLoader from monai.transforms import Compose, SimulateDelayd from monai.utils import PerfContext, set_determinism -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestDataLoader(unittest.TestCase): diff --git a/tests/test_threadcontainer.py b/tests/test_threadcontainer.py index 568461748b..e61ef2bfd1 100644 --- a/tests/test_threadcontainer.py +++ b/tests/test_threadcontainer.py @@ -21,7 +21,7 @@ from monai.data import DataLoader from monai.utils import optional_import, set_determinism from monai.utils.enums import CommonKeys -from tests.utils import SkipIfNoModule +from tests.test_utils import SkipIfNoModule try: _, has_ignite = optional_import("ignite") diff --git a/tests/test_threshold_intensity.py b/tests/test_threshold_intensity.py index 97c80eebcd..dd485af05b 100644 --- a/tests/test_threshold_intensity.py +++ b/tests/test_threshold_intensity.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import ThresholdIntensity -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_threshold_intensityd.py b/tests/test_threshold_intensityd.py index 867ebfe952..5e7fef0fe3 100644 --- a/tests/test_threshold_intensityd.py +++ b/tests/test_threshold_intensityd.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import ThresholdIntensityd -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_timedcall_dist.py b/tests/test_timedcall_dist.py index a814a99b25..6c2cc7a653 100644 --- a/tests/test_timedcall_dist.py +++ b/tests/test_timedcall_dist.py @@ -16,7 +16,7 @@ import time import unittest -from tests.utils import TimedCall +from tests.test_utils import TimedCall @TimedCall(seconds=20 if sys.platform == "linux" else 60, force_quit=False) diff --git a/tests/test_to_contiguous.py b/tests/test_to_contiguous.py index 73a9ca27f6..a6a9cbf799 100644 --- a/tests/test_to_contiguous.py +++ b/tests/test_to_contiguous.py @@ -17,7 +17,7 @@ import torch from monai.transforms import convert_to_contiguous -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose class TestToContiguous(unittest.TestCase): diff --git a/tests/test_to_cupy.py b/tests/test_to_cupy.py index 38400f0d3f..62dfd1c903 100644 --- a/tests/test_to_cupy.py +++ b/tests/test_to_cupy.py @@ -19,7 +19,7 @@ from monai.transforms import ToCupy from monai.utils import optional_import -from tests.utils import HAS_CUPY, skip_if_no_cuda +from tests.test_utils import HAS_CUPY, skip_if_no_cuda cp, _ = optional_import("cupy") diff --git a/tests/test_to_cupyd.py b/tests/test_to_cupyd.py index a07ab671e1..390c2cb6df 100644 --- a/tests/test_to_cupyd.py +++ b/tests/test_to_cupyd.py @@ -19,7 +19,7 @@ from monai.transforms import ToCupyd from monai.utils import optional_import -from tests.utils import HAS_CUPY, skip_if_no_cuda +from tests.test_utils import HAS_CUPY, skip_if_no_cuda cp, _ = optional_import("cupy") diff --git a/tests/test_to_device.py b/tests/test_to_device.py index 6a13ffca99..34d2a16e07 100644 --- a/tests/test_to_device.py +++ b/tests/test_to_device.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import ToDevice -from tests.utils import assert_allclose, skip_if_no_cuda +from tests.test_utils import assert_allclose, skip_if_no_cuda TEST_CASE_1 = ["cuda:0"] diff --git a/tests/test_to_deviced.py b/tests/test_to_deviced.py index 19c2d0761f..9580dd4e10 100644 --- a/tests/test_to_deviced.py +++ b/tests/test_to_deviced.py @@ -17,7 +17,7 @@ from monai.data import CacheDataset, ThreadDataLoader from monai.transforms import ToDeviced -from tests.utils import assert_allclose, skip_if_no_cuda +from tests.test_utils import assert_allclose, skip_if_no_cuda @skip_if_no_cuda diff --git a/tests/test_to_from_meta_tensord.py b/tests/test_to_from_meta_tensord.py index fe777cec77..06c089cb5e 100644 --- a/tests/test_to_from_meta_tensord.py +++ b/tests/test_to_from_meta_tensord.py @@ -24,7 +24,7 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import FromMetaTensord, ToMetaTensord from monai.utils.enums import PostFix -from tests.utils import TEST_DEVICES, assert_allclose +from tests.test_utils import TEST_DEVICES, assert_allclose DTYPES = [[torch.float32], [torch.float64], [torch.float16], [torch.int64], [torch.int32]] TESTS = [] diff --git a/tests/test_to_numpy.py b/tests/test_to_numpy.py index f4e5f80a29..be5ce1f38a 100644 --- a/tests/test_to_numpy.py +++ b/tests/test_to_numpy.py @@ -19,7 +19,7 @@ from monai.transforms import ToNumpy from monai.utils import optional_import -from tests.utils import HAS_CUPY, assert_allclose, skip_if_no_cuda +from tests.test_utils import HAS_CUPY, assert_allclose, skip_if_no_cuda cp, _ = optional_import("cupy") diff --git a/tests/test_to_numpyd.py b/tests/test_to_numpyd.py index ae9b4c84b3..2dcafa2da8 100644 --- a/tests/test_to_numpyd.py +++ b/tests/test_to_numpyd.py @@ -19,7 +19,7 @@ from monai.transforms import ToNumpyd from monai.utils import optional_import -from tests.utils import HAS_CUPY, assert_allclose, skip_if_no_cuda +from tests.test_utils import HAS_CUPY, assert_allclose, skip_if_no_cuda cp, _ = optional_import("cupy") diff --git a/tests/test_to_pil.py b/tests/test_to_pil.py index 352e10bcc1..25d533b94e 100644 --- a/tests/test_to_pil.py +++ b/tests/test_to_pil.py @@ -20,7 +20,7 @@ from monai.transforms import ToPIL from monai.utils import optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose if TYPE_CHECKING: from PIL.Image import Image as PILImageImage diff --git a/tests/test_to_pild.py b/tests/test_to_pild.py index 1a0232e134..13fe3a87a8 100644 --- a/tests/test_to_pild.py +++ b/tests/test_to_pild.py @@ -20,7 +20,7 @@ from monai.transforms import ToPILd from monai.utils import optional_import -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose if TYPE_CHECKING: from PIL.Image import Image as PILImageImage diff --git a/tests/test_to_tensor.py b/tests/test_to_tensor.py index 50df80128b..3fa93bc51f 100644 --- a/tests/test_to_tensor.py +++ b/tests/test_to_tensor.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import ToTensor -from tests.utils import HAS_CUPY, TEST_NDARRAYS, assert_allclose, optional_import +from tests.test_utils import HAS_CUPY, TEST_NDARRAYS, assert_allclose, optional_import cp, _ = optional_import("cupy") diff --git a/tests/test_to_tensord.py b/tests/test_to_tensord.py index 1eab7b9485..e6ad27610d 100644 --- a/tests/test_to_tensord.py +++ b/tests/test_to_tensord.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import ToTensord -from tests.utils import HAS_CUPY, TEST_NDARRAYS, assert_allclose, optional_import +from tests.test_utils import HAS_CUPY, TEST_NDARRAYS, assert_allclose, optional_import cp, _ = optional_import("cupy") diff --git a/tests/test_torchiod.py b/tests/test_torchiod.py index 892287461c..b4edc763d2 100644 --- a/tests/test_torchiod.py +++ b/tests/test_torchiod.py @@ -19,7 +19,7 @@ from monai.transforms import TorchIOd from monai.utils import optional_import -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose _, has_torchio = optional_import("torchio") diff --git a/tests/test_torchvision.py b/tests/test_torchvision.py index 2931b0c1a8..d64147013f 100644 --- a/tests/test_torchvision.py +++ b/tests/test_torchvision.py @@ -17,7 +17,7 @@ from monai.transforms import TorchVision from monai.utils import set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_torchvision_fc_model.py b/tests/test_torchvision_fc_model.py index 9cc19db62c..e6bf3f1e7a 100644 --- a/tests/test_torchvision_fc_model.py +++ b/tests/test_torchvision_fc_model.py @@ -21,7 +21,7 @@ from monai.networks.nets import TorchVisionFCModel, UNet from monai.networks.utils import look_up_named_module, set_named_module from monai.utils import min_version, optional_import -from tests.utils import skip_if_downloading_fails +from tests.test_utils import skip_if_downloading_fails Inception_V3_Weights, has_enum = optional_import("torchvision.models.inception", name="Inception_V3_Weights") diff --git a/tests/test_torchvisiond.py b/tests/test_torchvisiond.py index ec09692df9..f772a8ec86 100644 --- a/tests/test_torchvisiond.py +++ b/tests/test_torchvisiond.py @@ -18,7 +18,7 @@ from monai.transforms import TorchVisiond from monai.utils import set_determinism -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose TEST_CASE_1 = [ {"keys": "img", "name": "ColorJitter"}, diff --git a/tests/test_trainable_bilateral.py b/tests/test_trainable_bilateral.py index c69eff4071..ea8cb8a9dc 100644 --- a/tests/test_trainable_bilateral.py +++ b/tests/test_trainable_bilateral.py @@ -19,7 +19,7 @@ from torch.autograd import gradcheck from monai.networks.layers.filtering import TrainableBilateralFilterFunction -from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda +from tests.test_utils import skip_if_no_cpp_extension, skip_if_no_cuda TEST_CASES = [ [ diff --git a/tests/test_trainable_joint_bilateral.py b/tests/test_trainable_joint_bilateral.py index 4263683ce2..a21596945b 100644 --- a/tests/test_trainable_joint_bilateral.py +++ b/tests/test_trainable_joint_bilateral.py @@ -19,7 +19,7 @@ from torch.autograd import gradcheck from monai.networks.layers.filtering import TrainableJointBilateralFilterFunction -from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda, skip_if_quick +from tests.test_utils import skip_if_no_cpp_extension, skip_if_no_cuda, skip_if_quick TEST_CASES = [ [ diff --git a/tests/test_transchex.py b/tests/test_transchex.py index 481c20e285..0940cf62ab 100644 --- a/tests/test_transchex.py +++ b/tests/test_transchex.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets.transchex import Transchex -from tests.utils import skip_if_quick +from tests.test_utils import skip_if_quick TEST_CASE_TRANSCHEX = [] for drop_out in [0.4]: diff --git a/tests/test_transformer.py b/tests/test_transformer.py index b371809d47..fea5d023bf 100644 --- a/tests/test_transformer.py +++ b/tests/test_transformer.py @@ -24,7 +24,7 @@ from monai.networks import eval_mode from monai.networks.nets import DecoderOnlyTransformer from monai.utils import optional_import -from tests.utils import skip_if_downloading_fails, testing_data_config +from tests.test_utils import skip_if_downloading_fails, testing_data_config _, has_einops = optional_import("einops") TEST_CASES = [] diff --git a/tests/test_transpose.py b/tests/test_transpose.py index 2f5ccd1235..ae1dbde12b 100644 --- a/tests/test_transpose.py +++ b/tests/test_transpose.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.transforms import Transpose -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_transposed.py b/tests/test_transposed.py index e7c6ecbe8a..74d48383eb 100644 --- a/tests/test_transposed.py +++ b/tests/test_transposed.py @@ -19,7 +19,7 @@ from parameterized import parameterized from monai.transforms import Transposed -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_trt_compile.py b/tests/test_trt_compile.py index f7779fec9b..a8fdd02f20 100644 --- a/tests/test_trt_compile.py +++ b/tests/test_trt_compile.py @@ -21,7 +21,7 @@ from monai.networks import trt_compile from monai.networks.nets import cell_sam_wrapper, vista3d132 from monai.utils import min_version, optional_import -from tests.utils import SkipIfBeforeComputeCapabilityVersion, skip_if_no_cuda, skip_if_quick, skip_if_windows +from tests.test_utils import SkipIfBeforeComputeCapabilityVersion, skip_if_no_cuda, skip_if_quick, skip_if_windows trt, trt_imported = optional_import("tensorrt", "10.1.0", min_version) torch_tensorrt, torch_trt_imported = optional_import("torch_tensorrt") diff --git a/tests/test_tversky_loss.py b/tests/test_tversky_loss.py index 73a841a55d..29c54fd0fc 100644 --- a/tests/test_tversky_loss.py +++ b/tests/test_tversky_loss.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.losses import TverskyLoss -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASES = [ [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) diff --git a/tests/test_ultrasound_confidence_map_transform.py b/tests/test_ultrasound_confidence_map_transform.py index 1c6b8f7635..982ad53675 100644 --- a/tests/test_ultrasound_confidence_map_transform.py +++ b/tests/test_ultrasound_confidence_map_transform.py @@ -21,7 +21,7 @@ from monai.transforms import UltrasoundConfidenceMapTransform from monai.utils import optional_import -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose _, has_scipy = optional_import("scipy") diff --git a/tests/test_unet.py b/tests/test_unet.py index 1fb98f84b0..41310eca2b 100644 --- a/tests/test_unet.py +++ b/tests/test_unet.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.layers import Act, Norm from monai.networks.nets import UNet -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_unetr.py b/tests/test_unetr.py index 8c5ecb32e1..7aef81c184 100644 --- a/tests/test_unetr.py +++ b/tests/test_unetr.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets.unetr import UNETR -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_script_save TEST_CASE_UNETR = [] for dropout_rate in [0.4]: diff --git a/tests/test_unetr_block.py b/tests/test_unetr_block.py index 9701557ed6..d6cab1b1c4 100644 --- a/tests/test_unetr_block.py +++ b/tests/test_unetr_block.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.blocks.dynunet_block import get_padding from monai.networks.blocks.unetr_block import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock -from tests.utils import test_script_save +from tests.test_utils import test_script_save TEST_CASE_UNETR_BASIC_BLOCK = [] for spatial_dims in range(1, 4): diff --git a/tests/utils.py b/tests/test_utils.py similarity index 99% rename from tests/utils.py rename to tests/test_utils.py index 2a00af50e9..c494bb547c 100644 --- a/tests/utils.py +++ b/tests/test_utils.py @@ -30,6 +30,7 @@ import warnings from contextlib import contextmanager from functools import partial, reduce +from pathlib import Path from subprocess import PIPE, Popen from typing import Callable from urllib.error import ContentTooShortError, HTTPError @@ -58,11 +59,13 @@ _tf32_enabled = None _test_data_config: dict = {} +MODULE_PATH = Path(__file__).resolve().parents[1] + def testing_data_config(*keys): """get _test_data_config[keys0][keys1]...[keysN]""" if not _test_data_config: - with open(os.path.join(os.path.dirname(__file__), "testing_data", "data_config.json")) as c: + with open(f"{MODULE_PATH}/tests/testing_data/data_config.json") as c: _config = json.load(c) for k, v in _config.items(): _test_data_config[k] = v diff --git a/tests/test_utils_pytorch_numpy_unification.py b/tests/test_utils_pytorch_numpy_unification.py index 90c0401e46..cf382d15d2 100644 --- a/tests/test_utils_pytorch_numpy_unification.py +++ b/tests/test_utils_pytorch_numpy_unification.py @@ -19,7 +19,7 @@ from monai.transforms.utils_pytorch_numpy_unification import max, min, mode, percentile from monai.utils import set_determinism -from tests.utils import TEST_NDARRAYS, assert_allclose, skip_if_quick +from tests.test_utils import TEST_NDARRAYS, assert_allclose, skip_if_quick TEST_MODE = [] for p in TEST_NDARRAYS: diff --git a/tests/test_varautoencoder.py b/tests/test_varautoencoder.py index e957dcfb61..aaaa11886c 100644 --- a/tests/test_varautoencoder.py +++ b/tests/test_varautoencoder.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.layers import Act from monai.networks.nets import VarAutoEncoder -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") diff --git a/tests/test_varnet.py b/tests/test_varnet.py index a46d58d6a2..b1f38dd30c 100644 --- a/tests/test_varnet.py +++ b/tests/test_varnet.py @@ -20,7 +20,7 @@ from monai.apps.reconstruction.networks.nets.complex_unet import ComplexUnet from monai.apps.reconstruction.networks.nets.varnet import VariationalNetworkModel from monai.networks import eval_mode -from tests.utils import SkipIfBeforePyTorchVersion, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, test_script_save device = torch.device("cuda" if torch.cuda.is_available() else "cpu") coil_sens_model = CoilSensitivityModel(spatial_dims=2, features=[8, 16, 32, 64, 128, 8]) diff --git a/tests/test_video_datasets.py b/tests/test_video_datasets.py index 6e344e1caa..32eed94407 100644 --- a/tests/test_video_datasets.py +++ b/tests/test_video_datasets.py @@ -20,7 +20,7 @@ from monai.data.dataloader import DataLoader from monai.data.video_dataset import CameraDataset, VideoDataset, VideoFileDataset from monai.utils.module import optional_import -from tests.utils import assert_allclose, download_url_or_skip_test, testing_data_config +from tests.test_utils import assert_allclose, download_url_or_skip_test, testing_data_config cv2, has_cv2 = optional_import("cv2") diff --git a/tests/test_vis_gradcam.py b/tests/test_vis_gradcam.py index f77d916a5b..e8d225f6f5 100644 --- a/tests/test_vis_gradcam.py +++ b/tests/test_vis_gradcam.py @@ -20,7 +20,7 @@ from monai.networks.nets import DenseNet, DenseNet121, SEResNet50 from monai.visualize import GradCAM, GradCAMpp -from tests.utils import assert_allclose, skip_if_quick +from tests.test_utils import assert_allclose, skip_if_quick class DenseNetAdjoint(DenseNet121): diff --git a/tests/test_vista3d.py b/tests/test_vista3d.py index d3b4e0c10e..05b40b5beb 100644 --- a/tests/test_vista3d.py +++ b/tests/test_vista3d.py @@ -19,7 +19,7 @@ from monai.networks import eval_mode from monai.networks.nets import VISTA3D, SegResNetDS2 from monai.networks.nets.vista3d import ClassMappingClassify, PointMappingSAM -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_quick +from tests.test_utils import SkipIfBeforePyTorchVersion, skip_if_quick device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_vista3d_utils.py b/tests/test_vista3d_utils.py index 5a0caedd61..191c306957 100644 --- a/tests/test_vista3d_utils.py +++ b/tests/test_vista3d_utils.py @@ -21,7 +21,7 @@ from monai.transforms.utils import convert_points_to_disc, keep_merge_components_with_points, sample_points_from_label from monai.utils import min_version from monai.utils.module import optional_import -from tests.utils import skip_if_no_cuda, skip_if_quick +from tests.test_utils import skip_if_no_cuda, skip_if_quick cp, has_cp = optional_import("cupy") cucim_skimage, has_cucim = optional_import("cucim.skimage") diff --git a/tests/test_vit.py b/tests/test_vit.py index a3ffd0b2ef..ba1659f01a 100644 --- a/tests/test_vit.py +++ b/tests/test_vit.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets.vit import ViT -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_script_save +from tests.test_utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_script_save TEST_CASE_Vit = [] for dropout_rate in [0.6]: diff --git a/tests/test_vitautoenc.py b/tests/test_vitautoenc.py index 9a503948d0..00eb3e12e7 100644 --- a/tests/test_vitautoenc.py +++ b/tests/test_vitautoenc.py @@ -17,7 +17,7 @@ from monai.networks import eval_mode from monai.networks.nets.vitautoenc import ViTAutoEnc -from tests.utils import skip_if_quick, skip_if_windows +from tests.test_utils import skip_if_quick, skip_if_windows TEST_CASE_Vitautoenc = [] for in_channels in [1, 4]: diff --git a/tests/test_vnet.py b/tests/test_vnet.py index 0ebf060434..f0d8989813 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets import VNet -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_vote_ensemble.py b/tests/test_vote_ensemble.py index 4abdd0b050..f034a442c7 100644 --- a/tests/test_vote_ensemble.py +++ b/tests/test_vote_ensemble.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import VoteEnsemble -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_vote_ensembled.py b/tests/test_vote_ensembled.py index 957133d7fc..1ad4d17869 100644 --- a/tests/test_vote_ensembled.py +++ b/tests/test_vote_ensembled.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.transforms import VoteEnsembled -from tests.utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: diff --git a/tests/test_voxelmorph.py b/tests/test_voxelmorph.py index ef420ef20c..fc302df071 100644 --- a/tests/test_voxelmorph.py +++ b/tests/test_voxelmorph.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets import VoxelMorph, VoxelMorphUNet -from tests.utils import test_script_save +from tests.test_utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_vqvae.py b/tests/test_vqvae.py index 4916dc2faa..624fb37930 100644 --- a/tests/test_vqvae.py +++ b/tests/test_vqvae.py @@ -18,7 +18,7 @@ from monai.networks import eval_mode from monai.networks.nets.vqvae import VQVAE -from tests.utils import SkipIfBeforePyTorchVersion +from tests.test_utils import SkipIfBeforePyTorchVersion TEST_CASES = [ [ diff --git a/tests/test_warp.py b/tests/test_warp.py index 0e5f2466db..4d3f5d2c42 100644 --- a/tests/test_warp.py +++ b/tests/test_warp.py @@ -22,7 +22,7 @@ from monai.networks.blocks.warp import Warp from monai.transforms import LoadImaged from monai.utils import GridSampleMode, GridSamplePadMode -from tests.utils import ( +from tests.test_utils import ( SkipIfBeforePyTorchVersion, SkipIfNoModule, download_url_or_skip_test, diff --git a/tests/test_watershed.py b/tests/test_watershed.py index 3f7a29bfe7..bef4a7a8d0 100644 --- a/tests/test_watershed.py +++ b/tests/test_watershed.py @@ -24,7 +24,7 @@ Watershed, ) from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS _, has_skimage = optional_import("skimage", "0.19.3", min_version) _, has_scipy = optional_import("scipy", "1.8.1", min_version) diff --git a/tests/test_watershedd.py b/tests/test_watershedd.py index fc44996be4..7a6067e8a1 100644 --- a/tests/test_watershedd.py +++ b/tests/test_watershedd.py @@ -25,7 +25,7 @@ ) from monai.transforms import Compose from monai.utils import min_version, optional_import -from tests.utils import TEST_NDARRAYS +from tests.test_utils import TEST_NDARRAYS _, has_skimage = optional_import("skimage", "0.19.3", min_version) _, has_scipy = optional_import("scipy", "1.8.1", min_version) diff --git a/tests/test_weighted_random_sampler_dist.py b/tests/test_weighted_random_sampler_dist.py index 8e37482da6..d60fae08da 100644 --- a/tests/test_weighted_random_sampler_dist.py +++ b/tests/test_weighted_random_sampler_dist.py @@ -18,7 +18,7 @@ import torch.distributed as dist from monai.data import DistributedWeightedRandomSampler -from tests.utils import DistCall, DistTestCase, skip_if_darwin, skip_if_windows +from tests.test_utils import DistCall, DistTestCase, skip_if_darwin, skip_if_windows @skip_if_windows diff --git a/tests/test_wsi_sliding_window_splitter.py b/tests/test_wsi_sliding_window_splitter.py index c510ece272..0494cc18da 100644 --- a/tests/test_wsi_sliding_window_splitter.py +++ b/tests/test_wsi_sliding_window_splitter.py @@ -20,7 +20,7 @@ from monai.data import CuCIMWSIReader, ImageReader, OpenSlideWSIReader, WSIReader from monai.inferers import WSISlidingWindowSplitter -from tests.utils import download_url_or_skip_test, optional_import, testing_data_config +from tests.test_utils import download_url_or_skip_test, optional_import, testing_data_config cucim, has_cucim = optional_import("cucim") has_cucim = has_cucim and hasattr(cucim, "CuImage") diff --git a/tests/test_wsireader.py b/tests/test_wsireader.py index 99a86c5ac8..5ce4ca9502 100644 --- a/tests/test_wsireader.py +++ b/tests/test_wsireader.py @@ -26,7 +26,7 @@ from monai.transforms import Compose, LoadImaged, ToTensord from monai.utils import first, optional_import from monai.utils.enums import PostFix, WSIPatchKeys -from tests.utils import assert_allclose, download_url_or_skip_test, skip_if_no_cuda, testing_data_config +from tests.test_utils import assert_allclose, download_url_or_skip_test, skip_if_no_cuda, testing_data_config cucim, has_cucim = optional_import("cucim") has_cucim = has_cucim and hasattr(cucim, "CuImage") diff --git a/tests/test_zarr_avg_merger.py b/tests/test_zarr_avg_merger.py index 64e8fbde71..b5ba1d9902 100644 --- a/tests/test_zarr_avg_merger.py +++ b/tests/test_zarr_avg_merger.py @@ -20,7 +20,7 @@ from monai.inferers import ZarrAvgMerger from monai.utils import get_package_version, optional_import, version_geq -from tests.utils import assert_allclose +from tests.test_utils import assert_allclose np.seterr(divide="ignore", invalid="ignore") zarr, has_zarr = optional_import("zarr") diff --git a/tests/test_zoom.py b/tests/test_zoom.py index 2db2df4486..67da9e2e82 100644 --- a/tests/test_zoom.py +++ b/tests/test_zoom.py @@ -21,7 +21,7 @@ from monai.data import MetaTensor, set_track_meta from monai.transforms import Zoom from monai.transforms.lazy.functional import apply_pending -from tests.utils import ( +from tests.test_utils import ( DEFAULT_TEST_AFFINE, TEST_NDARRAYS_ALL, NumpyImageTestCase2D, diff --git a/tests/test_zoomd.py b/tests/test_zoomd.py index ad91f398ff..dacc9eb897 100644 --- a/tests/test_zoomd.py +++ b/tests/test_zoomd.py @@ -21,7 +21,7 @@ from monai.config import USE_COMPILED from monai.transforms import Zoomd from tests.lazy_transforms_utils import test_resampler_lazy -from tests.utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion +from tests.test_utils import TEST_NDARRAYS_ALL, NumpyImageTestCase2D, assert_allclose, test_local_inversion VALID_CASES = [ (1.5, "nearest", False), From 2c63f5a13dee837eaf0362a18ff9744f425285a8 Mon Sep 17 00:00:00 2001 From: Rafael Garcia-Dias Date: Wed, 12 Feb 2025 12:56:34 +0000 Subject: [PATCH 12/55] 8185 - Refactor test (#8231) Fixes #8185 ### Description ## Reorganize tests I have looked at the imports in each test file and the test title to identify which files were being tested. I mirrored the file structure of MONAI on the `tests` folder and moved the files accordingly. I used some helper scripts, but the process required substantial manual intervention. When uncertain, I moved the tests to the `integration` folder since the confusion always involved many imports, and I could not find clarity from the test name. Please review the integration folder carefully, which is the one that I feel the least confident about. ``` ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [x] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. --------- Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Signed-off-by: R. Garcia-Dias Signed-off-by: Rafael Garcia-Dias Signed-off-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Rafael Garcia-Dias Co-authored-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/transforms/io/array.py | 1 - runtests.sh | 4 +-- tests/apps/__init__.py | 10 ++++++ tests/apps/deepedit/__init__.py | 10 ++++++ .../deepedit}/test_deepedit_transforms.py | 0 tests/apps/deepgrow/__init__.py | 10 ++++++ .../deepgrow}/test_deepgrow_dataset.py | 0 tests/apps/deepgrow/transforms/__init__.py | 10 ++++++ .../transforms}/test_deepgrow_interaction.py | 0 .../transforms}/test_deepgrow_transforms.py | 0 tests/apps/detection/__init__.py | 10 ++++++ tests/apps/detection/metrics/__init__.py | 10 ++++++ .../metrics}/test_detection_coco_metrics.py | 0 tests/apps/detection/networks/__init__.py | 10 ++++++ .../detection/networks}/test_retinanet.py | 1 - .../networks}/test_retinanet_detector.py | 2 -- .../detection}/test_box_transform.py | 1 - tests/apps/detection/utils/__init__.py | 10 ++++++ .../detection/utils}/test_anchor_box.py | 1 - .../detection/utils}/test_atss_box_matcher.py | 1 - .../detection/utils}/test_box_coder.py | 1 - .../utils}/test_detector_boxselector.py | 1 - .../detection/utils}/test_detector_utils.py | 1 - .../detection/utils}/test_hardnegsampler.py | 1 - tests/apps/maisi/networks/__init__.py | 10 ++++++ .../networks}/test_autoencoderkl_maisi.py | 1 - .../maisi/networks}/test_controlnet_maisi.py | 1 - .../test_diffusion_model_unet_maisi.py | 0 tests/apps/nuclick/__init__.py | 10 ++++++ .../nuclick}/test_nuclick_transforms.py | 0 tests/apps/pathology/__init__.py | 10 ++++++ tests/apps/pathology/handlers/__init__.py | 10 ++++++ .../handlers}/test_from_engine_hovernet.py | 1 - .../{ => apps/pathology}/test_lesion_froc.py | 0 .../pathology}/test_pathology_prob_nms.py | 0 .../pathology}/test_prepare_batch_hovernet.py | 1 - .../test_sliding_window_hovernet_inference.py | 3 +- tests/apps/pathology/transforms/__init__.py | 10 ++++++ .../pathology/transforms/post/__init__.py | 10 ++++++ .../post}/test_generate_distance_map.py | 1 - .../post}/test_generate_distance_mapd.py | 1 - .../post}/test_generate_instance_border.py | 1 - .../post}/test_generate_instance_borderd.py | 1 - .../post}/test_generate_instance_centroid.py | 1 - .../post}/test_generate_instance_centroidd.py | 1 - .../post}/test_generate_instance_contour.py | 1 - .../post}/test_generate_instance_contourd.py | 1 - .../post}/test_generate_instance_type.py | 1 - .../post}/test_generate_instance_typed.py | 1 - .../post}/test_generate_succinct_contour.py | 0 .../post}/test_generate_succinct_contourd.py | 0 .../post}/test_generate_watershed_markers.py | 1 - .../post}/test_generate_watershed_markersd.py | 1 - .../post}/test_generate_watershed_mask.py | 1 - .../post}/test_generate_watershed_maskd.py | 1 - ...t_hovernet_instance_map_post_processing.py | 1 - ..._hovernet_instance_map_post_processingd.py | 1 - ...t_hovernet_nuclear_type_post_processing.py | 1 - .../transforms/post}/test_watershed.py | 1 - .../transforms/post}/test_watershedd.py | 1 - .../transforms}/test_pathology_he_stain.py | 0 .../test_pathology_he_stain_dict.py | 0 tests/apps/reconstruction/__init__.py | 10 ++++++ tests/apps/reconstruction/nets/__init__.py | 10 ++++++ .../nets}/test_recon_net_utils.py | 3 +- .../reconstruction}/test_complex_utils.py | 1 - .../reconstruction}/test_fastmri_reader.py | 1 - .../reconstruction}/test_mri_utils.py | 1 - .../reconstruction/transforms/__init__.py | 10 ++++++ .../transforms}/test_kspace_mask.py | 0 ...est_reference_based_normalize_intensity.py | 1 - .../test_reference_based_spatial_cropd.py | 1 - tests/{ => apps}/test_auto3dseg_bundlegen.py | 1 - tests/{ => apps}/test_check_hash.py | 0 tests/{ => apps}/test_cross_validation.py | 5 ++- tests/{ => apps}/test_decathlondataset.py | 3 +- tests/{ => apps}/test_download_and_extract.py | 4 +-- tests/{ => apps}/test_download_url_yandex.py | 0 tests/{ => apps}/test_mednistdataset.py | 3 +- tests/{ => apps}/test_mmar_download.py | 1 - tests/{ => apps}/test_tciadataset.py | 4 +-- tests/apps/vista3d/__init__.py | 10 ++++++ .../test_point_based_window_inferer.py | 0 .../vista3d}/test_vista3d_sampler.py | 0 .../vista3d}/test_vista3d_transforms.py | 0 tests/bundle/__init__.py | 10 ++++++ tests/{ => bundle}/test_bundle_ckpt_export.py | 9 +++-- tests/{ => bundle}/test_bundle_download.py | 3 -- tests/{ => bundle}/test_bundle_get_data.py | 1 - .../test_bundle_push_to_hf_hub.py | 1 - tests/{ => bundle}/test_bundle_trt_export.py | 12 ++++--- tests/{ => bundle}/test_bundle_utils.py | 2 -- .../test_bundle_verify_metadata.py | 7 ++-- tests/{ => bundle}/test_bundle_verify_net.py | 8 +++-- tests/{ => bundle}/test_bundle_workflow.py | 34 ++++++++++--------- tests/{ => bundle}/test_component_locator.py | 0 tests/{ => bundle}/test_config_item.py | 0 tests/{ => bundle}/test_config_parser.py | 8 ++--- tests/{ => bundle}/test_reference_resolver.py | 0 tests/clang_format_utils.py | 3 +- tests/config/__init__.py | 10 ++++++ tests/{ => config}/test_cv2_dist.py | 1 - tests/croppers.py | 1 - .../meta_tensor}/test_meta_tensor.py | 1 - .../meta_tensor}/test_to_from_meta_tensord.py | 1 - tests/{ => data}/test_arraydataset.py | 0 tests/{ => data}/test_box_utils.py | 1 - tests/{ => data}/test_cachedataset.py | 0 .../{ => data}/test_cachedataset_parallel.py | 0 .../test_cachedataset_persistent_workers.py | 0 tests/{ => data}/test_cachentransdataset.py | 0 tests/{ => data}/test_check_missing_files.py | 0 .../test_create_cross_validation_datalist.py | 0 tests/{ => data}/test_csv_dataset.py | 0 tests/{ => data}/test_csv_iterable_dataset.py | 1 - tests/{ => data}/test_csv_saver.py | 0 tests/{ => data}/test_dataloader.py | 2 -- tests/{ => data}/test_dataset.py | 4 +-- tests/{ => data}/test_dataset_func.py | 0 tests/{ => data}/test_dataset_summary.py | 0 tests/{ => data}/test_fft_utils.py | 1 - tests/{ => data}/test_folder_layout.py | 0 tests/{ => data}/test_gdsdataset.py | 2 -- tests/{ => data}/test_grid_dataset.py | 1 - tests/{ => data}/test_handler_smartcache.py | 0 tests/{ => data}/test_hashing.py | 0 tests/{ => data}/test_header_correct.py | 0 tests/{ => data}/test_image_dataset.py | 0 tests/{ => data}/test_image_rw.py | 4 --- tests/{ => data}/test_init_reader.py | 1 - tests/{ => data}/test_is_supported_format.py | 0 tests/{ => data}/test_iterable_dataset.py | 0 tests/{ => data}/test_itk_torch_bridge.py | 5 ++- tests/{ => data}/test_itk_writer.py | 0 tests/{ => data}/test_list_data_collate.py | 0 tests/{ => data}/test_lmdbdataset.py | 2 -- tests/{ => data}/test_lmdbdataset_dist.py | 2 -- .../test_load_decathlon_datalist.py | 0 tests/{ => data}/test_make_nifti.py | 1 - tests/{ => data}/test_mapping_file.py | 0 .../test_masked_patch_wsi_dataset.py | 7 ++-- tests/{ => data}/test_nifti_header_revise.py | 0 tests/{ => data}/test_nifti_rw.py | 1 - tests/{ => data}/test_npzdictitemdataset.py | 0 tests/{ => data}/test_nrrd_reader.py | 0 tests/{ => data}/test_numpy_reader.py | 1 - tests/{ => data}/test_partition_dataset.py | 0 .../test_partition_dataset_classes.py | 0 tests/{ => data}/test_patch_dataset.py | 0 tests/{ => data}/test_patch_wsi_dataset.py | 7 ++-- tests/{ => data}/test_persistentdataset.py | 0 .../{ => data}/test_persistentdataset_dist.py | 3 -- tests/{ => data}/test_pil_reader.py | 0 tests/{ => data}/test_png_rw.py | 0 tests/{ => data}/test_resample_datalist.py | 0 tests/{ => data}/test_sampler_dist.py | 1 - .../test_select_cross_validation_folds.py | 0 tests/{ => data}/test_shuffle_buffer.py | 1 - .../test_sliding_patch_wsi_dataset.py | 11 +++--- tests/{ => data}/test_smartcachedataset.py | 1 - tests/{ => data}/test_synthetic.py | 0 tests/{ => data}/test_thread_buffer.py | 1 - tests/{ => data}/test_threadcontainer.py | 6 ++-- tests/{ => data}/test_video_datasets.py | 5 +-- .../test_weighted_random_sampler_dist.py | 1 - tests/{ => data}/test_zipdataset.py | 0 tests/{ => data/utils}/test_decollate.py | 2 -- tests/{ => data/utils}/test_dev_collate.py | 0 tests/{ => data/utils}/test_file_basename.py | 0 tests/{ => data/utils}/test_ori_ras_lps.py | 13 +++---- tests/{ => data/utils}/test_zoom_affine.py | 0 tests/engines/__init__.py | 10 ++++++ .../{ => engines}/test_ensemble_evaluator.py | 3 -- .../test_prepare_batch_default.py | 1 - .../test_prepare_batch_default_dist.py | 1 - .../test_prepare_batch_diffusion.py | 0 .../test_prepare_batch_extra_input.py | 1 - tests/fl/__init__.py | 10 ++++++ tests/fl/monai_algo/__init__.py | 10 ++++++ .../{ => fl/monai_algo}/test_fl_monai_algo.py | 4 +-- .../monai_algo}/test_fl_monai_algo_dist.py | 7 ++-- tests/{ => fl}/test_fl_monai_algo_stats.py | 4 +-- tests/fl/utils/__init__.py | 10 ++++++ .../{ => fl/utils}/test_fl_exchange_object.py | 1 - tests/handlers/__init__.py | 10 ++++++ .../test_handler_checkpoint_loader.py | 1 - .../test_handler_checkpoint_saver.py | 0 .../test_handler_classification_saver.py | 0 .../test_handler_classification_saver_dist.py | 1 - .../test_handler_clearml_image.py | 0 .../test_handler_clearml_stats.py | 0 .../test_handler_confusion_matrix.py | 0 .../test_handler_confusion_matrix_dist.py | 1 - .../test_handler_decollate_batch.py | 1 - .../{ => handlers}/test_handler_early_stop.py | 0 .../test_handler_garbage_collector.py | 0 .../test_handler_hausdorff_distance.py | 0 .../test_handler_ignite_metric.py | 1 - .../test_handler_lr_scheduler.py | 0 .../{ => handlers}/test_handler_mean_dice.py | 0 tests/{ => handlers}/test_handler_mean_iou.py | 0 .../test_handler_metrics_reloaded.py | 2 -- .../test_handler_metrics_saver.py | 0 .../test_handler_metrics_saver_dist.py | 3 +- tests/{ => handlers}/test_handler_mlflow.py | 2 -- tests/{ => handlers}/test_handler_nvtx.py | 1 - .../test_handler_panoptic_quality.py | 1 - .../test_handler_parameter_scheduler.py | 3 -- .../test_handler_post_processing.py | 1 - .../test_handler_prob_map_producer.py | 5 +-- .../test_handler_regression_metrics.py | 0 .../test_handler_regression_metrics_dist.py | 4 --- tests/{ => handlers}/test_handler_rocauc.py | 0 .../test_handler_rocauc_dist.py | 1 - tests/{ => handlers}/test_handler_stats.py | 0 .../test_handler_surface_distance.py | 0 tests/{ => handlers}/test_handler_tb_image.py | 1 - tests/{ => handlers}/test_handler_tb_stats.py | 0 .../{ => handlers}/test_handler_validation.py | 0 tests/{ => handlers}/test_trt_compile.py | 1 - .../test_write_metrics_reports.py | 0 tests/hvd_evenly_divisible_all_gather.py | 1 - tests/inferers/__init__.py | 10 ++++++ tests/{ => inferers}/test_avg_merger.py | 1 - .../test_controlnet_inferers.py | 0 .../{ => inferers}/test_diffusion_inferer.py | 0 .../test_latent_diffusion_inferer.py | 0 tests/{ => inferers}/test_patch_inferer.py | 1 - tests/{ => inferers}/test_saliency_inferer.py | 0 tests/{ => inferers}/test_slice_inferer.py | 0 .../test_sliding_window_inference.py | 2 -- .../test_sliding_window_splitter.py | 1 - .../test_wsi_sliding_window_splitter.py | 6 ++-- tests/{ => inferers}/test_zarr_avg_merger.py | 0 tests/integration/__init__.py | 10 ++++++ .../test_auto3dseg_ensemble.py | 1 - tests/{ => integration}/test_auto3dseg_hpo.py | 2 -- .../test_deepedit_interaction.py | 0 .../test_downsample_block.py | 0 ..._hovernet_nuclear_type_post_processingd.py | 1 - .../test_integration_autorunner.py | 1 - .../test_integration_bundle_run.py | 15 ++++---- .../test_integration_classification_2d.py | 12 +++---- .../test_integration_determinism.py | 3 -- .../test_integration_fast_train.py | 1 - .../test_integration_gpu_customization.py | 1 - .../test_integration_lazy_samples.py | 1 - .../test_integration_nnunetv2_runner.py | 1 - .../test_integration_segmentation_3d.py | 9 +++-- .../test_integration_sliding_window.py | 1 - .../{ => integration}/test_integration_stn.py | 1 - .../test_integration_unet_2d.py | 3 -- .../test_integration_workers.py | 1 - .../test_integration_workflows.py | 3 -- .../test_integration_workflows_adversarial.py | 0 .../test_integration_workflows_gan.py | 1 - .../test_loader_semaphore.py | 0 tests/{ => integration}/test_mapping_filed.py | 0 tests/{ => integration}/test_meta_affine.py | 7 ++-- .../test_metatensor_integration.py | 9 ++--- tests/{ => integration}/test_module_list.py | 0 tests/{ => integration}/test_one_of.py | 0 tests/{ => integration}/test_pad_collation.py | 0 .../test_reg_loss_integration.py | 2 -- .../test_retinanet_predict_utils.py | 0 .../test_seg_loss_integration.py | 0 .../test_spatial_combine_transforms.py | 1 - .../test_testtimeaugmentation.py | 3 +- tests/{ => integration}/test_vis_gradbased.py | 0 tests/{ => integration}/test_vista3d_utils.py | 3 -- tests/lazy_transforms_utils.py | 2 +- tests/losses/__init__.py | 10 ++++++ tests/losses/deform/__init__.py | 10 ++++++ .../deform}/test_bending_energy.py | 0 .../deform}/test_diffusion_loss.py | 0 tests/losses/image_dissimilarity/__init__.py | 10 ++++++ .../test_global_mutual_information_loss.py | 6 ++-- ...local_normalized_cross_correlation_loss.py | 0 tests/{ => losses}/test_adversarial_loss.py | 0 tests/{ => losses}/test_barlow_twins_loss.py | 0 tests/{ => losses}/test_cldice_loss.py | 0 tests/{ => losses}/test_contrastive_loss.py | 0 tests/{ => losses}/test_dice_ce_loss.py | 0 tests/{ => losses}/test_dice_focal_loss.py | 1 - tests/{ => losses}/test_dice_loss.py | 1 - tests/{ => losses}/test_ds_loss.py | 4 --- tests/{ => losses}/test_focal_loss.py | 1 - .../test_generalized_dice_focal_loss.py | 1 - .../test_generalized_dice_loss.py | 1 - .../test_generalized_wasserstein_dice_loss.py | 2 -- tests/{ => losses}/test_giou_loss.py | 0 tests/{ => losses}/test_hausdorff_loss.py | 0 tests/{ => losses}/test_masked_dice_loss.py | 0 tests/{ => losses}/test_masked_loss.py | 1 - tests/{ => losses}/test_multi_scale.py | 1 - tests/{ => losses}/test_nacl_loss.py | 0 tests/{ => losses}/test_perceptual_loss.py | 1 - tests/{ => losses}/test_spectral_loss.py | 1 - tests/{ => losses}/test_ssim_loss.py | 0 tests/{ => losses}/test_sure_loss.py | 0 tests/{ => losses}/test_tversky_loss.py | 1 - tests/{ => losses}/test_unified_focal_loss.py | 0 tests/metrics/__init__.py | 10 ++++++ .../test_compute_confusion_matrix.py | 1 - tests/{ => metrics}/test_compute_f_beta.py | 1 - .../{ => metrics}/test_compute_fid_metric.py | 0 tests/{ => metrics}/test_compute_froc.py | 0 .../test_compute_generalized_dice.py | 0 tests/{ => metrics}/test_compute_meandice.py | 0 tests/{ => metrics}/test_compute_meaniou.py | 0 .../{ => metrics}/test_compute_mmd_metric.py | 0 .../test_compute_multiscalessim_metric.py | 0 .../test_compute_panoptic_quality.py | 4 +-- .../test_compute_regression_metrics.py | 0 tests/{ => metrics}/test_compute_roc_auc.py | 0 tests/{ => metrics}/test_compute_variance.py | 0 tests/{ => metrics}/test_cumulative.py | 1 - .../{ => metrics}/test_cumulative_average.py | 0 .../test_cumulative_average_dist.py | 1 - .../{ => metrics}/test_hausdorff_distance.py | 0 .../{ => metrics}/test_label_quality_score.py | 0 tests/{ => metrics}/test_loss_metric.py | 0 tests/{ => metrics}/test_metrics_reloaded.py | 0 tests/{ => metrics}/test_ssim_metric.py | 0 tests/{ => metrics}/test_surface_dice.py | 5 ++- tests/{ => metrics}/test_surface_distance.py | 0 tests/min_tests.py | 21 +++++++----- tests/networks/__init__.py | 10 ++++++ tests/networks/blocks/__init__.py | 10 ++++++ tests/networks/blocks/dints_block/__init__.py | 10 ++++++ .../blocks/dints_block}/test_acn_block.py | 0 .../dints_block}/test_factorized_increase.py | 0 .../dints_block}/test_factorized_reduce.py | 0 .../blocks/dints_block}/test_p3d_block.py | 0 tests/{ => networks/blocks}/test_adn.py | 2 -- .../blocks}/test_convolutions.py | 3 -- tests/{ => networks/blocks}/test_crf_cpu.py | 1 - tests/{ => networks/blocks}/test_crf_cuda.py | 1 - .../blocks}/test_crossattention.py | 1 - .../{ => networks/blocks}/test_denseblock.py | 4 --- .../blocks}/test_dynunet_block.py | 2 -- tests/{ => networks/blocks}/test_fpn_block.py | 2 -- .../blocks}/test_localnet_block.py | 0 tests/{ => networks/blocks}/test_mlp.py | 0 .../blocks}/test_patchembedding.py | 2 -- .../blocks}/test_regunet_block.py | 0 tests/{ => networks/blocks}/test_se_block.py | 1 - tests/{ => networks/blocks}/test_se_blocks.py | 2 -- .../blocks}/test_segresnet_block.py | 0 .../blocks}/test_selfattention.py | 2 -- .../{ => networks/blocks}/test_simple_aspp.py | 0 .../blocks}/test_spatialattention.py | 0 .../blocks}/test_subpixel_upsample.py | 1 - .../blocks}/test_text_encoding.py | 1 - .../blocks}/test_transformerblock.py | 0 .../{ => networks/blocks}/test_unetr_block.py | 3 -- .../blocks}/test_upsample_block.py | 0 tests/networks/blocks/warp/__init__.py | 10 ++++++ .../blocks/warp}/test_dvf2ddf.py | 0 tests/{ => networks/blocks/warp}/test_warp.py | 6 ++-- tests/networks/layers/__init__.py | 10 ++++++ tests/networks/layers/filtering/__init__.py | 10 ++++++ .../filtering}/test_bilateral_approx_cpu.py | 1 - .../filtering}/test_bilateral_approx_cuda.py | 1 - .../filtering}/test_bilateral_precise.py | 2 -- .../layers/filtering}/test_phl_cpu.py | 1 - .../layers/filtering}/test_phl_cuda.py | 1 - .../filtering}/test_trainable_bilateral.py | 2 -- .../test_trainable_joint_bilateral.py | 2 -- .../layers}/test_affine_transform.py | 3 -- .../layers}/test_apply_filter.py | 0 .../{ => networks/layers}/test_channel_pad.py | 0 .../layers}/test_conjugate_gradient.py | 0 tests/{ => networks/layers}/test_drop_path.py | 0 tests/{ => networks/layers}/test_gaussian.py | 0 .../layers}/test_gaussian_filter.py | 2 -- .../{ => networks/layers}/test_get_layers.py | 0 tests/{ => networks/layers}/test_gmm.py | 1 - tests/{ => networks/layers}/test_grid_pull.py | 1 - .../layers}/test_hilbert_transform.py | 2 -- tests/{ => networks/layers}/test_lltm.py | 1 - .../layers}/test_median_filter.py | 0 tests/{ => networks/layers}/test_polyval.py | 0 .../layers}/test_preset_filters.py | 0 .../layers}/test_savitzky_golay_filter.py | 4 --- .../layers}/test_separable_filter.py | 0 .../layers}/test_skip_connection.py | 0 .../layers}/test_vector_quantizer.py | 0 .../{ => networks/layers}/test_weight_init.py | 0 tests/networks/nets/__init__.py | 10 ++++++ tests/networks/nets/dints/__init__.py | 10 ++++++ .../nets/dints}/test_dints_cell.py | 0 .../nets/dints}/test_dints_mixop.py | 1 - tests/networks/nets/regunet/__init__.py | 10 ++++++ .../nets/regunet}/test_localnet.py | 1 - .../nets/regunet}/test_regunet.py | 1 - tests/{ => networks/nets}/test_ahnet.py | 6 ---- .../{ => networks/nets}/test_attentionunet.py | 1 - tests/{ => networks/nets}/test_autoencoder.py | 1 - .../{ => networks/nets}/test_autoencoderkl.py | 0 tests/{ => networks/nets}/test_basic_unet.py | 1 - .../nets}/test_basic_unetplusplus.py | 1 - .../nets}/test_bundle_init_bundle.py | 1 - .../nets}/test_cell_sam_wrapper.py | 0 tests/{ => networks/nets}/test_controlnet.py | 0 tests/{ => networks/nets}/test_daf3d.py | 1 - tests/{ => networks/nets}/test_densenet.py | 2 -- .../nets}/test_diffusion_model_unet.py | 0 .../{ => networks/nets}/test_dints_network.py | 2 -- .../{ => networks/nets}/test_discriminator.py | 1 - tests/{ => networks/nets}/test_dynunet.py | 3 -- .../{ => networks/nets}/test_efficientnet.py | 11 +++--- .../{ => networks/nets}/test_flexible_unet.py | 3 -- .../nets}/test_fullyconnectednet.py | 0 tests/{ => networks/nets}/test_generator.py | 1 - tests/{ => networks/nets}/test_globalnet.py | 2 -- tests/{ => networks/nets}/test_highresnet.py | 1 - tests/{ => networks/nets}/test_hovernet.py | 1 - tests/{ => networks/nets}/test_mednext.py | 0 tests/{ => networks/nets}/test_milmodel.py | 1 - tests/{ => networks/nets}/test_net_adapter.py | 1 - .../nets}/test_network_consistency.py | 1 - .../nets}/test_patch_gan_dicriminator.py | 0 tests/{ => networks/nets}/test_quicknat.py | 1 - tests/{ => networks/nets}/test_resnet.py | 2 -- tests/{ => networks/nets}/test_segresnet.py | 2 -- .../{ => networks/nets}/test_segresnet_ds.py | 1 - tests/{ => networks/nets}/test_senet.py | 5 ++- .../nets}/test_spade_autoencoderkl.py | 0 .../nets}/test_spade_diffusion_model_unet.py | 0 .../{ => networks/nets}/test_spade_vaegan.py | 0 tests/{ => networks/nets}/test_swin_unetr.py | 1 - .../nets}/test_torchvision_fc_model.py | 2 -- tests/{ => networks/nets}/test_transchex.py | 1 - tests/{ => networks/nets}/test_transformer.py | 1 - tests/{ => networks/nets}/test_unet.py | 1 - tests/{ => networks/nets}/test_unetr.py | 1 - .../nets}/test_varautoencoder.py | 1 - tests/{ => networks/nets}/test_vista3d.py | 1 - tests/{ => networks/nets}/test_vit.py | 1 - tests/{ => networks/nets}/test_vitautoenc.py | 1 - tests/{ => networks/nets}/test_vnet.py | 1 - tests/{ => networks/nets}/test_voxelmorph.py | 1 - tests/{ => networks/nets}/test_vqvae.py | 0 .../nets}/test_vqvaetransformer_inferer.py | 0 tests/networks/schedulers/__init__.py | 10 ++++++ .../schedulers}/test_scheduler_ddim.py | 0 .../schedulers}/test_scheduler_ddpm.py | 0 .../schedulers}/test_scheduler_pndm.py | 0 .../{ => networks}/test_bundle_onnx_export.py | 7 ++-- tests/{ => networks}/test_convert_to_onnx.py | 1 - .../test_convert_to_torchscript.py | 0 tests/{ => networks}/test_convert_to_trt.py | 1 - tests/{ => networks}/test_save_state.py | 0 tests/{ => networks}/test_to_onehot.py | 0 tests/{ => networks}/test_varnet.py | 1 - tests/networks/utils/__init__.py | 10 ++++++ .../utils}/test_copy_model_state.py | 0 tests/{ => networks/utils}/test_eval_mode.py | 0 .../utils}/test_freeze_layers.py | 3 +- .../utils}/test_replace_module.py | 1 - tests/{ => networks/utils}/test_train_mode.py | 0 tests/ngc_bundle_download.py | 2 -- tests/optimizers/__init__.py | 10 ++++++ .../test_generate_param_groups.py | 1 - tests/{ => optimizers}/test_lr_finder.py | 5 ++- tests/{ => optimizers}/test_lr_scheduler.py | 0 tests/{ => optimizers}/test_optim_novograd.py | 0 tests/padders.py | 1 - tests/profile_subclass/__init__.py | 10 ++++++ tests/runner.py | 24 ++++++++----- tests/test_auto3dseg.py | 1 - tests/test_call_dist.py | 1 - tests/test_masked_autoencoder_vit.py | 1 - tests/test_query_memory.py | 1 - tests/test_rand_torchiod.py | 1 - tests/test_set_visible_devices.py | 1 - tests/test_timedcall_dist.py | 1 - tests/test_torchiod.py | 1 - tests/transforms/__init__.py | 10 ++++++ tests/transforms/compose/__init__.py | 10 ++++++ .../{ => transforms/compose}/test_compose.py | 0 .../{ => transforms/compose}/test_some_of.py | 4 +-- tests/transforms/croppad/__init__.py | 10 ++++++ .../croppad}/test_rand_weighted_crop.py | 0 .../croppad}/test_rand_weighted_cropd.py | 1 - tests/transforms/functional/__init__.py | 10 ++++++ .../{ => transforms/functional}/test_apply.py | 1 - .../functional}/test_resample.py | 1 - tests/transforms/intensity/__init__.py | 10 ++++++ .../intensity}/test_compute_ho_ver_maps.py | 1 - .../intensity}/test_compute_ho_ver_maps_d.py | 1 - .../intensity}/test_foreground_mask.py | 1 - .../intensity}/test_foreground_maskd.py | 1 - .../intensity}/test_rand_histogram_shiftd.py | 1 - .../test_scale_intensity_range_percentiles.py | 1 - ...test_scale_intensity_range_percentilesd.py | 1 - tests/transforms/inverse/__init__.py | 10 ++++++ .../inverse}/test_inverse_array.py | 1 - .../inverse}/test_traceable_transform.py | 0 tests/transforms/post/__init__.py | 10 ++++++ .../post}/test_label_filterd.py | 1 - tests/{ => transforms/post}/test_probnms.py | 1 - tests/{ => transforms/post}/test_probnmsd.py | 1 - .../post}/test_remove_small_objects.py | 1 - tests/transforms/spatial/__init__.py | 10 ++++++ .../spatial}/test_convert_box_points.py | 2 -- .../spatial}/test_grid_patch.py | 1 - .../spatial}/test_grid_patchd.py | 1 - .../spatial}/test_rand_grid_patch.py | 1 - .../spatial}/test_rand_grid_patchd.py | 1 - .../spatial}/test_spatial_resampled.py | 1 - tests/{ => transforms}/test_activations.py | 1 - tests/{ => transforms}/test_activationsd.py | 1 - tests/{ => transforms}/test_adaptors.py | 0 .../test_add_coordinate_channels.py | 1 - .../test_add_coordinate_channelsd.py | 1 - .../test_add_extreme_points_channel.py | 1 - .../test_add_extreme_points_channeld.py | 1 - .../{ => transforms}/test_adjust_contrast.py | 1 - .../{ => transforms}/test_adjust_contrastd.py | 1 - tests/{ => transforms}/test_affine.py | 2 -- tests/{ => transforms}/test_affine_grid.py | 1 - tests/{ => transforms}/test_affined.py | 1 - .../{ => transforms}/test_as_channel_last.py | 1 - .../{ => transforms}/test_as_channel_lastd.py | 1 - tests/{ => transforms}/test_as_discrete.py | 1 - tests/{ => transforms}/test_as_discreted.py | 1 - tests/{ => transforms}/test_border_pad.py | 0 tests/{ => transforms}/test_border_padd.py | 0 tests/{ => transforms}/test_bounding_rect.py | 1 - tests/{ => transforms}/test_bounding_rectd.py | 1 - tests/{ => transforms}/test_cast_to_type.py | 1 - tests/{ => transforms}/test_cast_to_typed.py | 1 - .../test_center_scale_crop.py | 0 .../test_center_scale_cropd.py | 0 .../test_center_spatial_crop.py | 0 .../test_center_spatial_cropd.py | 0 .../test_classes_to_indices.py | 1 - .../test_classes_to_indicesd.py | 1 - .../test_clip_intensity_percentiles.py | 2 -- .../test_clip_intensity_percentilesd.py | 5 +-- .../test_compose_get_number_conversions.py | 0 tests/{ => transforms}/test_concat_itemsd.py | 1 - .../test_convert_to_multi_channel.py | 1 - .../test_convert_to_multi_channeld.py | 0 tests/{ => transforms}/test_copy_itemsd.py | 1 - .../test_create_grid_and_affine.py | 2 -- .../{ => transforms}/test_crop_foreground.py | 1 - .../{ => transforms}/test_crop_foregroundd.py | 1 - .../test_cucim_dict_transform.py | 1 - .../{ => transforms}/test_cucim_transform.py | 1 - tests/{ => transforms}/test_data_stats.py | 0 tests/{ => transforms}/test_data_statsd.py | 0 tests/{ => transforms}/test_delete_itemsd.py | 0 .../{ => transforms}/test_detect_envelope.py | 2 -- .../test_distance_transform_edt.py | 1 - tests/{ => transforms}/test_divisible_pad.py | 0 tests/{ => transforms}/test_divisible_padd.py | 0 .../test_ensure_channel_first.py | 0 .../test_ensure_channel_firstd.py | 0 tests/{ => transforms}/test_ensure_type.py | 1 - tests/{ => transforms}/test_ensure_typed.py | 1 - .../{ => transforms}/test_fg_bg_to_indices.py | 1 - .../test_fg_bg_to_indicesd.py | 1 - tests/{ => transforms}/test_fill_holes.py | 1 - tests/{ => transforms}/test_fill_holesd.py | 1 - .../test_flatten_sub_keysd.py | 0 tests/{ => transforms}/test_flip.py | 1 - tests/{ => transforms}/test_flipd.py | 1 - tests/{ => transforms}/test_fourier.py | 1 - .../{ => transforms}/test_gaussian_sharpen.py | 1 - .../test_gaussian_sharpend.py | 1 - .../{ => transforms}/test_gaussian_smooth.py | 1 - .../{ => transforms}/test_gaussian_smoothd.py | 1 - ...est_generate_label_classes_crop_centers.py | 1 - ...est_generate_pos_neg_label_crop_centers.py | 1 - .../test_generate_spatial_bounding_box.py | 1 - .../test_get_extreme_points.py | 1 - tests/{ => transforms}/test_gibbs_noise.py | 1 - tests/{ => transforms}/test_gibbs_noised.py | 1 - .../{ => transforms}/test_grid_distortion.py | 1 - .../{ => transforms}/test_grid_distortiond.py | 1 - tests/{ => transforms}/test_grid_split.py | 1 - tests/{ => transforms}/test_grid_splitd.py | 1 - .../test_histogram_normalize.py | 1 - .../test_histogram_normalized.py | 1 - tests/{ => transforms}/test_image_filter.py | 0 .../{ => transforms}/test_intensity_stats.py | 1 - .../{ => transforms}/test_intensity_statsd.py | 0 tests/{ => transforms}/test_inverse.py | 0 .../test_inverse_collation.py | 0 tests/{ => transforms}/test_invert.py | 1 - tests/{ => transforms}/test_invertd.py | 1 - .../test_k_space_spike_noise.py | 1 - .../test_k_space_spike_noised.py | 1 - .../test_keep_largest_connected_component.py | 1 - .../test_keep_largest_connected_componentd.py | 1 - tests/{ => transforms}/test_label_filter.py | 1 - .../{ => transforms}/test_label_to_contour.py | 1 - .../test_label_to_contourd.py | 1 - tests/{ => transforms}/test_label_to_mask.py | 1 - tests/{ => transforms}/test_label_to_maskd.py | 1 - tests/{ => transforms}/test_load_image.py | 2 -- tests/{ => transforms}/test_load_imaged.py | 3 -- .../test_load_spacing_orientation.py | 6 ++-- .../test_map_and_generate_sampling_centers.py | 1 - .../test_map_binary_to_indices.py | 1 - .../test_map_classes_to_indices.py | 1 - .../{ => transforms}/test_map_label_value.py | 1 - .../{ => transforms}/test_map_label_valued.py | 1 - tests/{ => transforms}/test_map_transform.py | 0 tests/{ => transforms}/test_mask_intensity.py | 1 - .../{ => transforms}/test_mask_intensityd.py | 0 tests/{ => transforms}/test_mean_ensemble.py | 1 - tests/{ => transforms}/test_mean_ensembled.py | 1 - tests/{ => transforms}/test_median_smooth.py | 1 - tests/{ => transforms}/test_median_smoothd.py | 1 - .../test_morphological_ops.py | 1 - .../{ => transforms}/test_nifti_endianness.py | 0 .../test_normalize_intensity.py | 1 - .../test_normalize_intensityd.py | 1 - tests/{ => transforms}/test_nvtx_decorator.py | 1 - tests/{ => transforms}/test_nvtx_transform.py | 0 tests/{ => transforms}/test_orientation.py | 1 - tests/{ => transforms}/test_orientationd.py | 1 - .../test_rand_adjust_contrast.py | 1 - .../test_rand_adjust_contrastd.py | 1 - tests/{ => transforms}/test_rand_affine.py | 1 - .../{ => transforms}/test_rand_affine_grid.py | 1 - tests/{ => transforms}/test_rand_affined.py | 1 - tests/{ => transforms}/test_rand_axis_flip.py | 1 - .../{ => transforms}/test_rand_axis_flipd.py | 1 - .../{ => transforms}/test_rand_bias_field.py | 1 - .../{ => transforms}/test_rand_bias_fieldd.py | 0 .../test_rand_coarse_dropout.py | 1 - .../test_rand_coarse_dropoutd.py | 0 .../test_rand_coarse_shuffle.py | 0 .../test_rand_coarse_shuffled.py | 0 .../test_rand_crop_by_label_classes.py | 1 - .../test_rand_crop_by_label_classesd.py | 1 - .../test_rand_crop_by_pos_neg_label.py | 1 - .../test_rand_crop_by_pos_neg_labeld.py | 1 - .../test_rand_cucim_dict_transform.py | 1 - .../test_rand_cucim_transform.py | 1 - .../{ => transforms}/test_rand_deform_grid.py | 1 - .../{ => transforms}/test_rand_elastic_2d.py | 1 - .../{ => transforms}/test_rand_elastic_3d.py | 1 - .../{ => transforms}/test_rand_elasticd_2d.py | 1 - .../{ => transforms}/test_rand_elasticd_3d.py | 1 - tests/{ => transforms}/test_rand_flip.py | 1 - tests/{ => transforms}/test_rand_flipd.py | 1 - .../test_rand_gaussian_noise.py | 1 - .../test_rand_gaussian_noised.py | 1 - .../test_rand_gaussian_sharpen.py | 1 - .../test_rand_gaussian_sharpend.py | 1 - .../test_rand_gaussian_smooth.py | 1 - .../test_rand_gaussian_smoothd.py | 1 - .../{ => transforms}/test_rand_gibbs_noise.py | 1 - .../test_rand_gibbs_noised.py | 1 - .../test_rand_grid_distortion.py | 1 - .../test_rand_grid_distortiond.py | 1 - .../test_rand_histogram_shift.py | 1 - .../test_rand_k_space_spike_noise.py | 1 - .../test_rand_k_space_spike_noised.py | 1 - .../test_rand_rician_noise.py | 1 - .../test_rand_rician_noised.py | 1 - tests/{ => transforms}/test_rand_rotate.py | 3 -- tests/{ => transforms}/test_rand_rotate90.py | 1 - tests/{ => transforms}/test_rand_rotate90d.py | 1 - tests/{ => transforms}/test_rand_rotated.py | 2 -- .../{ => transforms}/test_rand_scale_crop.py | 0 .../{ => transforms}/test_rand_scale_cropd.py | 0 .../test_rand_scale_intensity.py | 1 - .../test_rand_scale_intensity_fixed_mean.py | 1 - .../test_rand_scale_intensity_fixed_meand.py | 1 - .../test_rand_scale_intensityd.py | 1 - .../test_rand_shift_intensity.py | 1 - .../test_rand_shift_intensityd.py | 1 - .../test_rand_simulate_low_resolution.py | 1 - .../test_rand_simulate_low_resolutiond.py | 1 - .../test_rand_spatial_crop.py | 0 .../test_rand_spatial_crop_samples.py | 0 .../test_rand_spatial_crop_samplesd.py | 1 - .../test_rand_spatial_cropd.py | 0 .../test_rand_std_shift_intensity.py | 1 - .../test_rand_std_shift_intensityd.py | 1 - tests/{ => transforms}/test_rand_zoom.py | 1 - tests/{ => transforms}/test_rand_zoomd.py | 1 - tests/{ => transforms}/test_randidentity.py | 2 -- tests/{ => transforms}/test_random_order.py | 6 +--- .../{ => transforms}/test_randtorchvisiond.py | 1 - tests/{ => transforms}/test_regularization.py | 3 -- .../test_remove_repeated_channel.py | 1 - .../test_remove_repeated_channeld.py | 1 - tests/{ => transforms}/test_repeat_channel.py | 1 - .../{ => transforms}/test_repeat_channeld.py | 1 - .../test_resample_backends.py | 1 - .../test_resample_to_match.py | 1 - .../test_resample_to_matchd.py | 1 - tests/{ => transforms}/test_resampler.py | 1 - tests/{ => transforms}/test_resize.py | 1 - .../test_resize_with_pad_or_crop.py | 1 - .../test_resize_with_pad_or_cropd.py | 3 +- tests/{ => transforms}/test_resized.py | 1 - tests/{ => transforms}/test_rotate.py | 2 -- tests/{ => transforms}/test_rotate90.py | 3 -- tests/{ => transforms}/test_rotate90d.py | 1 - tests/{ => transforms}/test_rotated.py | 3 -- .../test_save_classificationd.py | 0 tests/{ => transforms}/test_save_image.py | 0 tests/{ => transforms}/test_save_imaged.py | 0 .../test_savitzky_golay_smooth.py | 1 - .../test_savitzky_golay_smoothd.py | 1 - .../{ => transforms}/test_scale_intensity.py | 1 - .../test_scale_intensity_fixed_mean.py | 1 - .../test_scale_intensity_range.py | 1 - .../test_scale_intensity_ranged.py | 1 - .../{ => transforms}/test_scale_intensityd.py | 1 - tests/{ => transforms}/test_select_itemsd.py | 0 .../{ => transforms}/test_shift_intensity.py | 1 - .../{ => transforms}/test_shift_intensityd.py | 1 - .../test_signal_continuouswavelet.py | 5 +-- .../{ => transforms}/test_signal_fillempty.py | 6 ++-- .../test_signal_fillemptyd.py | 6 ++-- .../test_signal_rand_add_gaussiannoise.py | 6 ++-- .../test_signal_rand_add_sine.py | 4 ++- .../test_signal_rand_add_sine_partial.py | 6 ++-- .../test_signal_rand_add_squarepulse.py | 6 ++-- ...est_signal_rand_add_squarepulse_partial.py | 6 ++-- .../{ => transforms}/test_signal_rand_drop.py | 6 ++-- .../test_signal_rand_scale.py | 4 ++- .../test_signal_rand_shift.py | 6 ++-- .../test_signal_remove_frequency.py | 6 ++-- tests/{ => transforms}/test_smooth_field.py | 1 - tests/{ => transforms}/test_sobel_gradient.py | 0 .../{ => transforms}/test_sobel_gradientd.py | 0 tests/{ => transforms}/test_spacing.py | 1 - tests/{ => transforms}/test_spacingd.py | 1 - tests/{ => transforms}/test_spatial_crop.py | 0 tests/{ => transforms}/test_spatial_cropd.py | 0 tests/{ => transforms}/test_spatial_pad.py | 0 tests/{ => transforms}/test_spatial_padd.py | 0 .../{ => transforms}/test_spatial_resample.py | 1 - tests/{ => transforms}/test_squeezedim.py | 1 - tests/{ => transforms}/test_squeezedimd.py | 1 - .../test_std_shift_intensity.py | 1 - .../test_std_shift_intensityd.py | 1 - .../test_threshold_intensity.py | 1 - .../test_threshold_intensityd.py | 1 - tests/{ => transforms}/test_to_contiguous.py | 1 - tests/{ => transforms}/test_to_cupy.py | 1 - tests/{ => transforms}/test_to_cupyd.py | 1 - tests/{ => transforms}/test_to_device.py | 1 - tests/{ => transforms}/test_to_deviced.py | 1 - tests/{ => transforms}/test_to_numpy.py | 1 - tests/{ => transforms}/test_to_numpyd.py | 1 - tests/{ => transforms}/test_to_pil.py | 1 - tests/{ => transforms}/test_to_pild.py | 1 - tests/{ => transforms}/test_to_tensor.py | 1 - tests/{ => transforms}/test_to_tensord.py | 1 - tests/{ => transforms}/test_torchvision.py | 1 - tests/{ => transforms}/test_torchvisiond.py | 1 - tests/{ => transforms}/test_transform.py | 0 tests/{ => transforms}/test_transpose.py | 1 - tests/{ => transforms}/test_transposed.py | 1 - ...est_ultrasound_confidence_map_transform.py | 12 ++++--- .../test_utils_pytorch_numpy_unification.py | 1 - tests/{ => transforms}/test_vote_ensemble.py | 1 - tests/{ => transforms}/test_vote_ensembled.py | 1 - .../test_with_allow_missing_keys.py | 0 tests/{ => transforms}/test_zoom.py | 1 - tests/{ => transforms}/test_zoomd.py | 1 - tests/transforms/transform/__init__.py | 10 ++++++ .../transform}/test_randomizable.py | 0 .../test_randomizable_transform_type.py | 0 tests/transforms/utility/__init__.py | 10 ++++++ .../test_apply_transform_to_points.py | 0 .../test_apply_transform_to_pointsd.py | 0 .../{ => transforms/utility}/test_identity.py | 1 - .../utility}/test_identityd.py | 1 - tests/{ => transforms/utility}/test_lambda.py | 1 - .../{ => transforms/utility}/test_lambdad.py | 1 - .../utility}/test_rand_lambda.py | 1 - .../utility}/test_rand_lambdad.py | 1 - .../utility}/test_simulatedelay.py | 1 - .../utility}/test_simulatedelayd.py | 1 - .../{ => transforms/utility}/test_splitdim.py | 1 - .../utility}/test_splitdimd.py | 0 tests/transforms/utils/__init__.py | 10 ++++++ .../utils}/test_correct_crop_centers.py | 1 - .../utils}/test_get_unique_labels.py | 1 - .../utils}/test_print_transform_backends.py | 0 .../{ => transforms/utils}/test_soft_clip.py | 0 tests/utils/__init__.py | 10 ++++++ tests/utils/enums/__init__.py | 10 ++++++ tests/{ => utils/enums}/test_hovernet_loss.py | 0 tests/{ => utils/enums}/test_ordering.py | 0 tests/{ => utils/enums}/test_wsireader.py | 16 ++++----- tests/utils/misc/__init__.py | 10 ++++++ tests/{ => utils/misc}/test_ensure_tuple.py | 1 - tests/{ => utils/misc}/test_monai_env_vars.py | 0 .../{ => utils/misc}/test_monai_utils_misc.py | 0 tests/{ => utils/misc}/test_str2bool.py | 0 tests/{ => utils/misc}/test_str2list.py | 0 tests/{ => utils}/test_alias.py | 5 ++- tests/{ => utils}/test_component_store.py | 0 tests/{ => utils}/test_deprecated.py | 0 tests/{ => utils}/test_enum_bound_interp.py | 1 - .../test_evenly_divisible_all_gather_dist.py | 1 - tests/{ => utils}/test_get_package_version.py | 0 tests/{ => utils}/test_handler_logfile.py | 1 - .../{ => utils}/test_handler_metric_logger.py | 1 - tests/{ => utils}/test_list_to_dict.py | 0 tests/{ => utils}/test_look_up_option.py | 0 tests/{ => utils}/test_optional_import.py | 0 tests/{ => utils}/test_pad_mode.py | 1 - tests/{ => utils}/test_profiling.py | 1 - tests/{ => utils}/test_rankfilter_dist.py | 2 -- tests/{ => utils}/test_require_pkg.py | 0 tests/{ => utils}/test_sample_slices.py | 1 - tests/{ => utils}/test_set_determinism.py | 2 -- tests/{ => utils}/test_squeeze_unsqueeze.py | 0 tests/{ => utils}/test_state_cacher.py | 0 tests/{ => utils}/test_torchscript_utils.py | 0 tests/{ => utils}/test_version.py | 0 tests/{ => utils}/test_version_after.py | 0 tests/utils/type_conversion/__init__.py | 10 ++++++ .../test_convert_data_type.py | 1 - .../test_get_equivalent_dtype.py | 1 - .../type_conversion}/test_safe_dtype_range.py | 1 - tests/visualize/__init__.py | 10 ++++++ tests/{ => visualize}/test_img2tensorboard.py | 0 .../test_occlusion_sensitivity.py | 0 .../test_plot_2d_or_3d_image.py | 1 - tests/{ => visualize}/test_vis_cam.py | 0 tests/{ => visualize}/test_vis_gradcam.py | 2 -- tests/visualize/utils/__init__.py | 10 ++++++ .../utils}/test_blend_images.py | 1 - tests/{ => visualize/utils}/test_matshow3d.py | 12 ++++--- 840 files changed, 856 insertions(+), 795 deletions(-) create mode 100644 tests/apps/__init__.py create mode 100644 tests/apps/deepedit/__init__.py rename tests/{ => apps/deepedit}/test_deepedit_transforms.py (100%) create mode 100644 tests/apps/deepgrow/__init__.py rename tests/{ => apps/deepgrow}/test_deepgrow_dataset.py (100%) create mode 100644 tests/apps/deepgrow/transforms/__init__.py rename tests/{ => apps/deepgrow/transforms}/test_deepgrow_interaction.py (100%) rename tests/{ => apps/deepgrow/transforms}/test_deepgrow_transforms.py (100%) create mode 100644 tests/apps/detection/__init__.py create mode 100644 tests/apps/detection/metrics/__init__.py rename tests/{ => apps/detection/metrics}/test_detection_coco_metrics.py (100%) create mode 100644 tests/apps/detection/networks/__init__.py rename tests/{ => apps/detection/networks}/test_retinanet.py (99%) rename tests/{ => apps/detection/networks}/test_retinanet_detector.py (99%) rename tests/{ => apps/detection}/test_box_transform.py (99%) create mode 100644 tests/apps/detection/utils/__init__.py rename tests/{ => apps/detection/utils}/test_anchor_box.py (99%) rename tests/{ => apps/detection/utils}/test_atss_box_matcher.py (99%) rename tests/{ => apps/detection/utils}/test_box_coder.py (99%) rename tests/{ => apps/detection/utils}/test_detector_boxselector.py (99%) rename tests/{ => apps/detection/utils}/test_detector_utils.py (99%) rename tests/{ => apps/detection/utils}/test_hardnegsampler.py (99%) create mode 100644 tests/apps/maisi/networks/__init__.py rename tests/{ => apps/maisi/networks}/test_autoencoderkl_maisi.py (99%) rename tests/{ => apps/maisi/networks}/test_controlnet_maisi.py (99%) rename tests/{ => apps/maisi/networks}/test_diffusion_model_unet_maisi.py (100%) create mode 100644 tests/apps/nuclick/__init__.py rename tests/{ => apps/nuclick}/test_nuclick_transforms.py (100%) create mode 100644 tests/apps/pathology/__init__.py create mode 100644 tests/apps/pathology/handlers/__init__.py rename tests/{ => apps/pathology/handlers}/test_from_engine_hovernet.py (99%) rename tests/{ => apps/pathology}/test_lesion_froc.py (100%) rename tests/{ => apps/pathology}/test_pathology_prob_nms.py (100%) rename tests/{ => apps/pathology}/test_prepare_batch_hovernet.py (99%) rename tests/{ => apps/pathology}/test_sliding_window_hovernet_inference.py (99%) create mode 100644 tests/apps/pathology/transforms/__init__.py create mode 100644 tests/apps/pathology/transforms/post/__init__.py rename tests/{ => apps/pathology/transforms/post}/test_generate_distance_map.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_distance_mapd.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_instance_border.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_instance_borderd.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_instance_centroid.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_instance_centroidd.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_instance_contour.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_instance_contourd.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_instance_type.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_instance_typed.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_succinct_contour.py (100%) rename tests/{ => apps/pathology/transforms/post}/test_generate_succinct_contourd.py (100%) rename tests/{ => apps/pathology/transforms/post}/test_generate_watershed_markers.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_watershed_markersd.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_watershed_mask.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_generate_watershed_maskd.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_hovernet_instance_map_post_processing.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_hovernet_instance_map_post_processingd.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_hovernet_nuclear_type_post_processing.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_watershed.py (99%) rename tests/{ => apps/pathology/transforms/post}/test_watershedd.py (99%) rename tests/{ => apps/pathology/transforms}/test_pathology_he_stain.py (100%) rename tests/{ => apps/pathology/transforms}/test_pathology_he_stain_dict.py (100%) create mode 100644 tests/apps/reconstruction/__init__.py create mode 100644 tests/apps/reconstruction/nets/__init__.py rename tests/{ => apps/reconstruction/nets}/test_recon_net_utils.py (96%) rename tests/{ => apps/reconstruction}/test_complex_utils.py (99%) rename tests/{ => apps/reconstruction}/test_fastmri_reader.py (99%) rename tests/{ => apps/reconstruction}/test_mri_utils.py (99%) create mode 100644 tests/apps/reconstruction/transforms/__init__.py rename tests/{ => apps/reconstruction/transforms}/test_kspace_mask.py (100%) rename tests/{ => apps/reconstruction/transforms}/test_reference_based_normalize_intensity.py (99%) rename tests/{ => apps/reconstruction/transforms}/test_reference_based_spatial_cropd.py (99%) rename tests/{ => apps}/test_auto3dseg_bundlegen.py (99%) rename tests/{ => apps}/test_check_hash.py (100%) rename tests/{ => apps}/test_cross_validation.py (96%) rename tests/{ => apps}/test_decathlondataset.py (97%) rename tests/{ => apps}/test_download_and_extract.py (96%) rename tests/{ => apps}/test_download_url_yandex.py (100%) rename tests/{ => apps}/test_mednistdataset.py (96%) rename tests/{ => apps}/test_mmar_download.py (99%) rename tests/{ => apps}/test_tciadataset.py (97%) create mode 100644 tests/apps/vista3d/__init__.py rename tests/{ => apps/vista3d}/test_point_based_window_inferer.py (100%) rename tests/{ => apps/vista3d}/test_vista3d_sampler.py (100%) rename tests/{ => apps/vista3d}/test_vista3d_transforms.py (100%) create mode 100644 tests/bundle/__init__.py rename tests/{ => bundle}/test_bundle_ckpt_export.py (93%) rename tests/{ => bundle}/test_bundle_download.py (99%) rename tests/{ => bundle}/test_bundle_get_data.py (99%) rename tests/{ => bundle}/test_bundle_push_to_hf_hub.py (99%) rename tests/{ => bundle}/test_bundle_trt_export.py (93%) rename tests/{ => bundle}/test_bundle_utils.py (99%) rename tests/{ => bundle}/test_bundle_verify_metadata.py (91%) rename tests/{ => bundle}/test_bundle_verify_net.py (94%) rename tests/{ => bundle}/test_bundle_workflow.py (88%) rename tests/{ => bundle}/test_component_locator.py (100%) rename tests/{ => bundle}/test_config_item.py (100%) rename tests/{ => bundle}/test_config_parser.py (98%) rename tests/{ => bundle}/test_reference_resolver.py (100%) create mode 100644 tests/config/__init__.py rename tests/{ => config}/test_cv2_dist.py (99%) rename tests/{ => data/meta_tensor}/test_meta_tensor.py (99%) rename tests/{ => data/meta_tensor}/test_to_from_meta_tensord.py (99%) rename tests/{ => data}/test_arraydataset.py (100%) rename tests/{ => data}/test_box_utils.py (99%) rename tests/{ => data}/test_cachedataset.py (100%) rename tests/{ => data}/test_cachedataset_parallel.py (100%) rename tests/{ => data}/test_cachedataset_persistent_workers.py (100%) rename tests/{ => data}/test_cachentransdataset.py (100%) rename tests/{ => data}/test_check_missing_files.py (100%) rename tests/{ => data}/test_create_cross_validation_datalist.py (100%) rename tests/{ => data}/test_csv_dataset.py (100%) rename tests/{ => data}/test_csv_iterable_dataset.py (99%) rename tests/{ => data}/test_csv_saver.py (100%) rename tests/{ => data}/test_dataloader.py (99%) rename tests/{ => data}/test_dataset.py (98%) rename tests/{ => data}/test_dataset_func.py (100%) rename tests/{ => data}/test_dataset_summary.py (100%) rename tests/{ => data}/test_fft_utils.py (99%) rename tests/{ => data}/test_folder_layout.py (100%) rename tests/{ => data}/test_gdsdataset.py (99%) rename tests/{ => data}/test_grid_dataset.py (99%) rename tests/{ => data}/test_handler_smartcache.py (100%) rename tests/{ => data}/test_hashing.py (100%) rename tests/{ => data}/test_header_correct.py (100%) rename tests/{ => data}/test_image_dataset.py (100%) rename tests/{ => data}/test_image_rw.py (99%) rename tests/{ => data}/test_init_reader.py (99%) rename tests/{ => data}/test_is_supported_format.py (100%) rename tests/{ => data}/test_iterable_dataset.py (100%) rename tests/{ => data}/test_itk_torch_bridge.py (99%) rename tests/{ => data}/test_itk_writer.py (100%) rename tests/{ => data}/test_list_data_collate.py (100%) rename tests/{ => data}/test_lmdbdataset.py (99%) rename tests/{ => data}/test_lmdbdataset_dist.py (99%) rename tests/{ => data}/test_load_decathlon_datalist.py (100%) rename tests/{ => data}/test_make_nifti.py (99%) rename tests/{ => data}/test_mapping_file.py (100%) rename tests/{ => data}/test_masked_patch_wsi_dataset.py (96%) rename tests/{ => data}/test_nifti_header_revise.py (100%) rename tests/{ => data}/test_nifti_rw.py (99%) rename tests/{ => data}/test_npzdictitemdataset.py (100%) rename tests/{ => data}/test_nrrd_reader.py (100%) rename tests/{ => data}/test_numpy_reader.py (99%) rename tests/{ => data}/test_partition_dataset.py (100%) rename tests/{ => data}/test_partition_dataset_classes.py (100%) rename tests/{ => data}/test_patch_dataset.py (100%) rename tests/{ => data}/test_patch_wsi_dataset.py (98%) rename tests/{ => data}/test_persistentdataset.py (100%) rename tests/{ => data}/test_persistentdataset_dist.py (99%) rename tests/{ => data}/test_pil_reader.py (100%) rename tests/{ => data}/test_png_rw.py (100%) rename tests/{ => data}/test_resample_datalist.py (100%) rename tests/{ => data}/test_sampler_dist.py (99%) rename tests/{ => data}/test_select_cross_validation_folds.py (100%) rename tests/{ => data}/test_shuffle_buffer.py (99%) rename tests/{ => data}/test_sliding_patch_wsi_dataset.py (96%) rename tests/{ => data}/test_smartcachedataset.py (99%) rename tests/{ => data}/test_synthetic.py (100%) rename tests/{ => data}/test_thread_buffer.py (99%) rename tests/{ => data}/test_threadcontainer.py (96%) rename tests/{ => data}/test_video_datasets.py (97%) rename tests/{ => data}/test_weighted_random_sampler_dist.py (99%) rename tests/{ => data}/test_zipdataset.py (100%) rename tests/{ => data/utils}/test_decollate.py (99%) rename tests/{ => data/utils}/test_dev_collate.py (100%) rename tests/{ => data/utils}/test_file_basename.py (100%) rename tests/{ => data/utils}/test_ori_ras_lps.py (77%) rename tests/{ => data/utils}/test_zoom_affine.py (100%) create mode 100644 tests/engines/__init__.py rename tests/{ => engines}/test_ensemble_evaluator.py (99%) rename tests/{ => engines}/test_prepare_batch_default.py (99%) rename tests/{ => engines}/test_prepare_batch_default_dist.py (99%) rename tests/{ => engines}/test_prepare_batch_diffusion.py (100%) rename tests/{ => engines}/test_prepare_batch_extra_input.py (99%) create mode 100644 tests/fl/__init__.py create mode 100644 tests/fl/monai_algo/__init__.py rename tests/{ => fl/monai_algo}/test_fl_monai_algo.py (99%) rename tests/{ => fl/monai_algo}/test_fl_monai_algo_dist.py (96%) rename tests/{ => fl}/test_fl_monai_algo_stats.py (97%) create mode 100644 tests/fl/utils/__init__.py rename tests/{ => fl/utils}/test_fl_exchange_object.py (99%) create mode 100644 tests/handlers/__init__.py rename tests/{ => handlers}/test_handler_checkpoint_loader.py (99%) rename tests/{ => handlers}/test_handler_checkpoint_saver.py (100%) rename tests/{ => handlers}/test_handler_classification_saver.py (100%) rename tests/{ => handlers}/test_handler_classification_saver_dist.py (99%) rename tests/{ => handlers}/test_handler_clearml_image.py (100%) rename tests/{ => handlers}/test_handler_clearml_stats.py (100%) rename tests/{ => handlers}/test_handler_confusion_matrix.py (100%) rename tests/{ => handlers}/test_handler_confusion_matrix_dist.py (99%) rename tests/{ => handlers}/test_handler_decollate_batch.py (99%) rename tests/{ => handlers}/test_handler_early_stop.py (100%) rename tests/{ => handlers}/test_handler_garbage_collector.py (100%) rename tests/{ => handlers}/test_handler_hausdorff_distance.py (100%) rename tests/{ => handlers}/test_handler_ignite_metric.py (99%) rename tests/{ => handlers}/test_handler_lr_scheduler.py (100%) rename tests/{ => handlers}/test_handler_mean_dice.py (100%) rename tests/{ => handlers}/test_handler_mean_iou.py (100%) rename tests/{ => handlers}/test_handler_metrics_reloaded.py (99%) rename tests/{ => handlers}/test_handler_metrics_saver.py (100%) rename tests/{ => handlers}/test_handler_metrics_saver_dist.py (97%) rename tests/{ => handlers}/test_handler_mlflow.py (99%) rename tests/{ => handlers}/test_handler_nvtx.py (99%) rename tests/{ => handlers}/test_handler_panoptic_quality.py (99%) rename tests/{ => handlers}/test_handler_parameter_scheduler.py (99%) rename tests/{ => handlers}/test_handler_post_processing.py (99%) rename tests/{ => handlers}/test_handler_prob_map_producer.py (96%) rename tests/{ => handlers}/test_handler_regression_metrics.py (100%) rename tests/{ => handlers}/test_handler_regression_metrics_dist.py (99%) rename tests/{ => handlers}/test_handler_rocauc.py (100%) rename tests/{ => handlers}/test_handler_rocauc_dist.py (99%) rename tests/{ => handlers}/test_handler_stats.py (100%) rename tests/{ => handlers}/test_handler_surface_distance.py (100%) rename tests/{ => handlers}/test_handler_tb_image.py (99%) rename tests/{ => handlers}/test_handler_tb_stats.py (100%) rename tests/{ => handlers}/test_handler_validation.py (100%) rename tests/{ => handlers}/test_trt_compile.py (99%) rename tests/{ => handlers}/test_write_metrics_reports.py (100%) create mode 100644 tests/inferers/__init__.py rename tests/{ => inferers}/test_avg_merger.py (99%) rename tests/{ => inferers}/test_controlnet_inferers.py (100%) rename tests/{ => inferers}/test_diffusion_inferer.py (100%) rename tests/{ => inferers}/test_latent_diffusion_inferer.py (100%) rename tests/{ => inferers}/test_patch_inferer.py (99%) rename tests/{ => inferers}/test_saliency_inferer.py (100%) rename tests/{ => inferers}/test_slice_inferer.py (100%) rename tests/{ => inferers}/test_sliding_window_inference.py (99%) rename tests/{ => inferers}/test_sliding_window_splitter.py (99%) rename tests/{ => inferers}/test_wsi_sliding_window_splitter.py (98%) rename tests/{ => inferers}/test_zarr_avg_merger.py (100%) create mode 100644 tests/integration/__init__.py rename tests/{ => integration}/test_auto3dseg_ensemble.py (99%) rename tests/{ => integration}/test_auto3dseg_hpo.py (99%) rename tests/{ => integration}/test_deepedit_interaction.py (100%) rename tests/{ => integration}/test_downsample_block.py (100%) rename tests/{ => integration}/test_hovernet_nuclear_type_post_processingd.py (99%) rename tests/{ => integration}/test_integration_autorunner.py (99%) rename tests/{ => integration}/test_integration_bundle_run.py (94%) rename tests/{ => integration}/test_integration_classification_2d.py (97%) rename tests/{ => integration}/test_integration_determinism.py (99%) rename tests/{ => integration}/test_integration_fast_train.py (99%) rename tests/{ => integration}/test_integration_gpu_customization.py (99%) rename tests/{ => integration}/test_integration_lazy_samples.py (99%) rename tests/{ => integration}/test_integration_nnunetv2_runner.py (99%) rename tests/{ => integration}/test_integration_segmentation_3d.py (97%) rename tests/{ => integration}/test_integration_sliding_window.py (99%) rename tests/{ => integration}/test_integration_stn.py (99%) rename tests/{ => integration}/test_integration_unet_2d.py (99%) rename tests/{ => integration}/test_integration_workers.py (99%) rename tests/{ => integration}/test_integration_workflows.py (99%) rename tests/{ => integration}/test_integration_workflows_adversarial.py (100%) rename tests/{ => integration}/test_integration_workflows_gan.py (99%) rename tests/{ => integration}/test_loader_semaphore.py (100%) rename tests/{ => integration}/test_mapping_filed.py (100%) rename tests/{ => integration}/test_meta_affine.py (97%) rename tests/{ => integration}/test_metatensor_integration.py (94%) rename tests/{ => integration}/test_module_list.py (100%) rename tests/{ => integration}/test_one_of.py (100%) rename tests/{ => integration}/test_pad_collation.py (100%) rename tests/{ => integration}/test_reg_loss_integration.py (99%) rename tests/{ => integration}/test_retinanet_predict_utils.py (100%) rename tests/{ => integration}/test_seg_loss_integration.py (100%) rename tests/{ => integration}/test_spatial_combine_transforms.py (99%) rename tests/{ => integration}/test_testtimeaugmentation.py (98%) rename tests/{ => integration}/test_vis_gradbased.py (100%) rename tests/{ => integration}/test_vista3d_utils.py (99%) create mode 100644 tests/losses/__init__.py create mode 100644 tests/losses/deform/__init__.py rename tests/{ => losses/deform}/test_bending_energy.py (100%) rename tests/{ => losses/deform}/test_diffusion_loss.py (100%) create mode 100644 tests/losses/image_dissimilarity/__init__.py rename tests/{ => losses/image_dissimilarity}/test_global_mutual_information_loss.py (97%) rename tests/{ => losses/image_dissimilarity}/test_local_normalized_cross_correlation_loss.py (100%) rename tests/{ => losses}/test_adversarial_loss.py (100%) rename tests/{ => losses}/test_barlow_twins_loss.py (100%) rename tests/{ => losses}/test_cldice_loss.py (100%) rename tests/{ => losses}/test_contrastive_loss.py (100%) rename tests/{ => losses}/test_dice_ce_loss.py (100%) rename tests/{ => losses}/test_dice_focal_loss.py (99%) rename tests/{ => losses}/test_dice_loss.py (99%) rename tests/{ => losses}/test_ds_loss.py (99%) rename tests/{ => losses}/test_focal_loss.py (99%) rename tests/{ => losses}/test_generalized_dice_focal_loss.py (99%) rename tests/{ => losses}/test_generalized_dice_loss.py (99%) rename tests/{ => losses}/test_generalized_wasserstein_dice_loss.py (99%) rename tests/{ => losses}/test_giou_loss.py (100%) rename tests/{ => losses}/test_hausdorff_loss.py (100%) rename tests/{ => losses}/test_masked_dice_loss.py (100%) rename tests/{ => losses}/test_masked_loss.py (99%) rename tests/{ => losses}/test_multi_scale.py (99%) rename tests/{ => losses}/test_nacl_loss.py (100%) rename tests/{ => losses}/test_perceptual_loss.py (99%) rename tests/{ => losses}/test_spectral_loss.py (99%) rename tests/{ => losses}/test_ssim_loss.py (100%) rename tests/{ => losses}/test_sure_loss.py (100%) rename tests/{ => losses}/test_tversky_loss.py (99%) rename tests/{ => losses}/test_unified_focal_loss.py (100%) create mode 100644 tests/metrics/__init__.py rename tests/{ => metrics}/test_compute_confusion_matrix.py (99%) rename tests/{ => metrics}/test_compute_f_beta.py (99%) rename tests/{ => metrics}/test_compute_fid_metric.py (100%) rename tests/{ => metrics}/test_compute_froc.py (100%) rename tests/{ => metrics}/test_compute_generalized_dice.py (100%) rename tests/{ => metrics}/test_compute_meandice.py (100%) rename tests/{ => metrics}/test_compute_meaniou.py (100%) rename tests/{ => metrics}/test_compute_mmd_metric.py (100%) rename tests/{ => metrics}/test_compute_multiscalessim_metric.py (100%) rename tests/{ => metrics}/test_compute_panoptic_quality.py (98%) rename tests/{ => metrics}/test_compute_regression_metrics.py (100%) rename tests/{ => metrics}/test_compute_roc_auc.py (100%) rename tests/{ => metrics}/test_compute_variance.py (100%) rename tests/{ => metrics}/test_cumulative.py (99%) rename tests/{ => metrics}/test_cumulative_average.py (100%) rename tests/{ => metrics}/test_cumulative_average_dist.py (99%) rename tests/{ => metrics}/test_hausdorff_distance.py (100%) rename tests/{ => metrics}/test_label_quality_score.py (100%) rename tests/{ => metrics}/test_loss_metric.py (100%) rename tests/{ => metrics}/test_metrics_reloaded.py (100%) rename tests/{ => metrics}/test_ssim_metric.py (100%) rename tests/{ => metrics}/test_surface_dice.py (99%) rename tests/{ => metrics}/test_surface_distance.py (100%) create mode 100644 tests/networks/__init__.py create mode 100644 tests/networks/blocks/__init__.py create mode 100644 tests/networks/blocks/dints_block/__init__.py rename tests/{ => networks/blocks/dints_block}/test_acn_block.py (100%) rename tests/{ => networks/blocks/dints_block}/test_factorized_increase.py (100%) rename tests/{ => networks/blocks/dints_block}/test_factorized_reduce.py (100%) rename tests/{ => networks/blocks/dints_block}/test_p3d_block.py (100%) rename tests/{ => networks/blocks}/test_adn.py (99%) rename tests/{ => networks/blocks}/test_convolutions.py (99%) rename tests/{ => networks/blocks}/test_crf_cpu.py (99%) rename tests/{ => networks/blocks}/test_crf_cuda.py (99%) rename tests/{ => networks/blocks}/test_crossattention.py (99%) rename tests/{ => networks/blocks}/test_denseblock.py (99%) rename tests/{ => networks/blocks}/test_dynunet_block.py (99%) rename tests/{ => networks/blocks}/test_fpn_block.py (99%) rename tests/{ => networks/blocks}/test_localnet_block.py (100%) rename tests/{ => networks/blocks}/test_mlp.py (100%) rename tests/{ => networks/blocks}/test_patchembedding.py (99%) rename tests/{ => networks/blocks}/test_regunet_block.py (100%) rename tests/{ => networks/blocks}/test_se_block.py (99%) rename tests/{ => networks/blocks}/test_se_blocks.py (99%) rename tests/{ => networks/blocks}/test_segresnet_block.py (100%) rename tests/{ => networks/blocks}/test_selfattention.py (99%) rename tests/{ => networks/blocks}/test_simple_aspp.py (100%) rename tests/{ => networks/blocks}/test_spatialattention.py (100%) rename tests/{ => networks/blocks}/test_subpixel_upsample.py (99%) rename tests/{ => networks/blocks}/test_text_encoding.py (99%) rename tests/{ => networks/blocks}/test_transformerblock.py (100%) rename tests/{ => networks/blocks}/test_unetr_block.py (99%) rename tests/{ => networks/blocks}/test_upsample_block.py (100%) create mode 100644 tests/networks/blocks/warp/__init__.py rename tests/{ => networks/blocks/warp}/test_dvf2ddf.py (100%) rename tests/{ => networks/blocks/warp}/test_warp.py (98%) create mode 100644 tests/networks/layers/__init__.py create mode 100644 tests/networks/layers/filtering/__init__.py rename tests/{ => networks/layers/filtering}/test_bilateral_approx_cpu.py (99%) rename tests/{ => networks/layers/filtering}/test_bilateral_approx_cuda.py (99%) rename tests/{ => networks/layers/filtering}/test_bilateral_precise.py (99%) rename tests/{ => networks/layers/filtering}/test_phl_cpu.py (99%) rename tests/{ => networks/layers/filtering}/test_phl_cuda.py (99%) rename tests/{ => networks/layers/filtering}/test_trainable_bilateral.py (99%) rename tests/{ => networks/layers/filtering}/test_trainable_joint_bilateral.py (99%) rename tests/{ => networks/layers}/test_affine_transform.py (99%) rename tests/{ => networks/layers}/test_apply_filter.py (100%) rename tests/{ => networks/layers}/test_channel_pad.py (100%) rename tests/{ => networks/layers}/test_conjugate_gradient.py (100%) rename tests/{ => networks/layers}/test_drop_path.py (100%) rename tests/{ => networks/layers}/test_gaussian.py (100%) rename tests/{ => networks/layers}/test_gaussian_filter.py (99%) rename tests/{ => networks/layers}/test_get_layers.py (100%) rename tests/{ => networks/layers}/test_gmm.py (99%) rename tests/{ => networks/layers}/test_grid_pull.py (99%) rename tests/{ => networks/layers}/test_hilbert_transform.py (99%) rename tests/{ => networks/layers}/test_lltm.py (99%) rename tests/{ => networks/layers}/test_median_filter.py (100%) rename tests/{ => networks/layers}/test_polyval.py (100%) rename tests/{ => networks/layers}/test_preset_filters.py (100%) rename tests/{ => networks/layers}/test_savitzky_golay_filter.py (99%) rename tests/{ => networks/layers}/test_separable_filter.py (100%) rename tests/{ => networks/layers}/test_skip_connection.py (100%) rename tests/{ => networks/layers}/test_vector_quantizer.py (100%) rename tests/{ => networks/layers}/test_weight_init.py (100%) create mode 100644 tests/networks/nets/__init__.py create mode 100644 tests/networks/nets/dints/__init__.py rename tests/{ => networks/nets/dints}/test_dints_cell.py (100%) rename tests/{ => networks/nets/dints}/test_dints_mixop.py (99%) create mode 100644 tests/networks/nets/regunet/__init__.py rename tests/{ => networks/nets/regunet}/test_localnet.py (99%) rename tests/{ => networks/nets/regunet}/test_regunet.py (99%) rename tests/{ => networks/nets}/test_ahnet.py (99%) rename tests/{ => networks/nets}/test_attentionunet.py (99%) rename tests/{ => networks/nets}/test_autoencoder.py (99%) rename tests/{ => networks/nets}/test_autoencoderkl.py (100%) rename tests/{ => networks/nets}/test_basic_unet.py (99%) rename tests/{ => networks/nets}/test_basic_unetplusplus.py (99%) rename tests/{ => networks/nets}/test_bundle_init_bundle.py (99%) rename tests/{ => networks/nets}/test_cell_sam_wrapper.py (100%) rename tests/{ => networks/nets}/test_controlnet.py (100%) rename tests/{ => networks/nets}/test_daf3d.py (99%) rename tests/{ => networks/nets}/test_densenet.py (99%) rename tests/{ => networks/nets}/test_diffusion_model_unet.py (100%) rename tests/{ => networks/nets}/test_dints_network.py (99%) rename tests/{ => networks/nets}/test_discriminator.py (99%) rename tests/{ => networks/nets}/test_dynunet.py (99%) rename tests/{ => networks/nets}/test_efficientnet.py (97%) rename tests/{ => networks/nets}/test_flexible_unet.py (99%) rename tests/{ => networks/nets}/test_fullyconnectednet.py (100%) rename tests/{ => networks/nets}/test_generator.py (99%) rename tests/{ => networks/nets}/test_globalnet.py (99%) rename tests/{ => networks/nets}/test_highresnet.py (99%) rename tests/{ => networks/nets}/test_hovernet.py (99%) rename tests/{ => networks/nets}/test_mednext.py (100%) rename tests/{ => networks/nets}/test_milmodel.py (99%) rename tests/{ => networks/nets}/test_net_adapter.py (99%) rename tests/{ => networks/nets}/test_network_consistency.py (99%) rename tests/{ => networks/nets}/test_patch_gan_dicriminator.py (100%) rename tests/{ => networks/nets}/test_quicknat.py (99%) rename tests/{ => networks/nets}/test_resnet.py (99%) rename tests/{ => networks/nets}/test_segresnet.py (99%) rename tests/{ => networks/nets}/test_segresnet_ds.py (99%) rename tests/{ => networks/nets}/test_senet.py (98%) rename tests/{ => networks/nets}/test_spade_autoencoderkl.py (100%) rename tests/{ => networks/nets}/test_spade_diffusion_model_unet.py (100%) rename tests/{ => networks/nets}/test_spade_vaegan.py (100%) rename tests/{ => networks/nets}/test_swin_unetr.py (99%) rename tests/{ => networks/nets}/test_torchvision_fc_model.py (99%) rename tests/{ => networks/nets}/test_transchex.py (99%) rename tests/{ => networks/nets}/test_transformer.py (99%) rename tests/{ => networks/nets}/test_unet.py (99%) rename tests/{ => networks/nets}/test_unetr.py (99%) rename tests/{ => networks/nets}/test_varautoencoder.py (99%) rename tests/{ => networks/nets}/test_vista3d.py (99%) rename tests/{ => networks/nets}/test_vit.py (99%) rename tests/{ => networks/nets}/test_vitautoenc.py (99%) rename tests/{ => networks/nets}/test_vnet.py (99%) rename tests/{ => networks/nets}/test_voxelmorph.py (99%) rename tests/{ => networks/nets}/test_vqvae.py (100%) rename tests/{ => networks/nets}/test_vqvaetransformer_inferer.py (100%) create mode 100644 tests/networks/schedulers/__init__.py rename tests/{ => networks/schedulers}/test_scheduler_ddim.py (100%) rename tests/{ => networks/schedulers}/test_scheduler_ddpm.py (100%) rename tests/{ => networks/schedulers}/test_scheduler_pndm.py (100%) rename tests/{ => networks}/test_bundle_onnx_export.py (91%) rename tests/{ => networks}/test_convert_to_onnx.py (99%) rename tests/{ => networks}/test_convert_to_torchscript.py (100%) rename tests/{ => networks}/test_convert_to_trt.py (99%) rename tests/{ => networks}/test_save_state.py (100%) rename tests/{ => networks}/test_to_onehot.py (100%) rename tests/{ => networks}/test_varnet.py (99%) create mode 100644 tests/networks/utils/__init__.py rename tests/{ => networks/utils}/test_copy_model_state.py (100%) rename tests/{ => networks/utils}/test_eval_mode.py (100%) rename tests/{ => networks/utils}/test_freeze_layers.py (95%) rename tests/{ => networks/utils}/test_replace_module.py (99%) rename tests/{ => networks/utils}/test_train_mode.py (100%) create mode 100644 tests/optimizers/__init__.py rename tests/{ => optimizers}/test_generate_param_groups.py (99%) rename tests/{ => optimizers}/test_lr_finder.py (96%) rename tests/{ => optimizers}/test_lr_scheduler.py (100%) rename tests/{ => optimizers}/test_optim_novograd.py (100%) create mode 100644 tests/profile_subclass/__init__.py create mode 100644 tests/transforms/__init__.py create mode 100644 tests/transforms/compose/__init__.py rename tests/{ => transforms/compose}/test_compose.py (100%) rename tests/{ => transforms/compose}/test_some_of.py (98%) create mode 100644 tests/transforms/croppad/__init__.py rename tests/{ => transforms/croppad}/test_rand_weighted_crop.py (100%) rename tests/{ => transforms/croppad}/test_rand_weighted_cropd.py (99%) create mode 100644 tests/transforms/functional/__init__.py rename tests/{ => transforms/functional}/test_apply.py (99%) rename tests/{ => transforms/functional}/test_resample.py (99%) create mode 100644 tests/transforms/intensity/__init__.py rename tests/{ => transforms/intensity}/test_compute_ho_ver_maps.py (99%) rename tests/{ => transforms/intensity}/test_compute_ho_ver_maps_d.py (99%) rename tests/{ => transforms/intensity}/test_foreground_mask.py (99%) rename tests/{ => transforms/intensity}/test_foreground_maskd.py (99%) rename tests/{ => transforms/intensity}/test_rand_histogram_shiftd.py (99%) rename tests/{ => transforms/intensity}/test_scale_intensity_range_percentiles.py (99%) rename tests/{ => transforms/intensity}/test_scale_intensity_range_percentilesd.py (99%) create mode 100644 tests/transforms/inverse/__init__.py rename tests/{ => transforms/inverse}/test_inverse_array.py (99%) rename tests/{ => transforms/inverse}/test_traceable_transform.py (100%) create mode 100644 tests/transforms/post/__init__.py rename tests/{ => transforms/post}/test_label_filterd.py (99%) rename tests/{ => transforms/post}/test_probnms.py (99%) rename tests/{ => transforms/post}/test_probnmsd.py (99%) rename tests/{ => transforms/post}/test_remove_small_objects.py (99%) create mode 100644 tests/transforms/spatial/__init__.py rename tests/{ => transforms/spatial}/test_convert_box_points.py (99%) rename tests/{ => transforms/spatial}/test_grid_patch.py (99%) rename tests/{ => transforms/spatial}/test_grid_patchd.py (99%) rename tests/{ => transforms/spatial}/test_rand_grid_patch.py (99%) rename tests/{ => transforms/spatial}/test_rand_grid_patchd.py (99%) rename tests/{ => transforms/spatial}/test_spatial_resampled.py (99%) rename tests/{ => transforms}/test_activations.py (99%) rename tests/{ => transforms}/test_activationsd.py (99%) rename tests/{ => transforms}/test_adaptors.py (100%) rename tests/{ => transforms}/test_add_coordinate_channels.py (99%) rename tests/{ => transforms}/test_add_coordinate_channelsd.py (99%) rename tests/{ => transforms}/test_add_extreme_points_channel.py (99%) rename tests/{ => transforms}/test_add_extreme_points_channeld.py (99%) rename tests/{ => transforms}/test_adjust_contrast.py (99%) rename tests/{ => transforms}/test_adjust_contrastd.py (99%) rename tests/{ => transforms}/test_affine.py (99%) rename tests/{ => transforms}/test_affine_grid.py (99%) rename tests/{ => transforms}/test_affined.py (99%) rename tests/{ => transforms}/test_as_channel_last.py (99%) rename tests/{ => transforms}/test_as_channel_lastd.py (99%) rename tests/{ => transforms}/test_as_discrete.py (99%) rename tests/{ => transforms}/test_as_discreted.py (99%) rename tests/{ => transforms}/test_border_pad.py (100%) rename tests/{ => transforms}/test_border_padd.py (100%) rename tests/{ => transforms}/test_bounding_rect.py (99%) rename tests/{ => transforms}/test_bounding_rectd.py (99%) rename tests/{ => transforms}/test_cast_to_type.py (99%) rename tests/{ => transforms}/test_cast_to_typed.py (99%) rename tests/{ => transforms}/test_center_scale_crop.py (100%) rename tests/{ => transforms}/test_center_scale_cropd.py (100%) rename tests/{ => transforms}/test_center_spatial_crop.py (100%) rename tests/{ => transforms}/test_center_spatial_cropd.py (100%) rename tests/{ => transforms}/test_classes_to_indices.py (99%) rename tests/{ => transforms}/test_classes_to_indicesd.py (99%) rename tests/{ => transforms}/test_clip_intensity_percentiles.py (99%) rename tests/{ => transforms}/test_clip_intensity_percentilesd.py (98%) rename tests/{ => transforms}/test_compose_get_number_conversions.py (100%) rename tests/{ => transforms}/test_concat_itemsd.py (99%) rename tests/{ => transforms}/test_convert_to_multi_channel.py (99%) rename tests/{ => transforms}/test_convert_to_multi_channeld.py (100%) rename tests/{ => transforms}/test_copy_itemsd.py (99%) rename tests/{ => transforms}/test_create_grid_and_affine.py (99%) rename tests/{ => transforms}/test_crop_foreground.py (99%) rename tests/{ => transforms}/test_crop_foregroundd.py (99%) rename tests/{ => transforms}/test_cucim_dict_transform.py (99%) rename tests/{ => transforms}/test_cucim_transform.py (99%) rename tests/{ => transforms}/test_data_stats.py (100%) rename tests/{ => transforms}/test_data_statsd.py (100%) rename tests/{ => transforms}/test_delete_itemsd.py (100%) rename tests/{ => transforms}/test_detect_envelope.py (99%) rename tests/{ => transforms}/test_distance_transform_edt.py (99%) rename tests/{ => transforms}/test_divisible_pad.py (100%) rename tests/{ => transforms}/test_divisible_padd.py (100%) rename tests/{ => transforms}/test_ensure_channel_first.py (100%) rename tests/{ => transforms}/test_ensure_channel_firstd.py (100%) rename tests/{ => transforms}/test_ensure_type.py (99%) rename tests/{ => transforms}/test_ensure_typed.py (99%) rename tests/{ => transforms}/test_fg_bg_to_indices.py (99%) rename tests/{ => transforms}/test_fg_bg_to_indicesd.py (99%) rename tests/{ => transforms}/test_fill_holes.py (99%) rename tests/{ => transforms}/test_fill_holesd.py (99%) rename tests/{ => transforms}/test_flatten_sub_keysd.py (100%) rename tests/{ => transforms}/test_flip.py (99%) rename tests/{ => transforms}/test_flipd.py (99%) rename tests/{ => transforms}/test_fourier.py (99%) rename tests/{ => transforms}/test_gaussian_sharpen.py (99%) rename tests/{ => transforms}/test_gaussian_sharpend.py (99%) rename tests/{ => transforms}/test_gaussian_smooth.py (99%) rename tests/{ => transforms}/test_gaussian_smoothd.py (99%) rename tests/{ => transforms}/test_generate_label_classes_crop_centers.py (99%) rename tests/{ => transforms}/test_generate_pos_neg_label_crop_centers.py (99%) rename tests/{ => transforms}/test_generate_spatial_bounding_box.py (99%) rename tests/{ => transforms}/test_get_extreme_points.py (99%) rename tests/{ => transforms}/test_gibbs_noise.py (99%) rename tests/{ => transforms}/test_gibbs_noised.py (99%) rename tests/{ => transforms}/test_grid_distortion.py (99%) rename tests/{ => transforms}/test_grid_distortiond.py (99%) rename tests/{ => transforms}/test_grid_split.py (99%) rename tests/{ => transforms}/test_grid_splitd.py (99%) rename tests/{ => transforms}/test_histogram_normalize.py (99%) rename tests/{ => transforms}/test_histogram_normalized.py (99%) rename tests/{ => transforms}/test_image_filter.py (100%) rename tests/{ => transforms}/test_intensity_stats.py (99%) rename tests/{ => transforms}/test_intensity_statsd.py (100%) rename tests/{ => transforms}/test_inverse.py (100%) rename tests/{ => transforms}/test_inverse_collation.py (100%) rename tests/{ => transforms}/test_invert.py (99%) rename tests/{ => transforms}/test_invertd.py (99%) rename tests/{ => transforms}/test_k_space_spike_noise.py (99%) rename tests/{ => transforms}/test_k_space_spike_noised.py (99%) rename tests/{ => transforms}/test_keep_largest_connected_component.py (99%) rename tests/{ => transforms}/test_keep_largest_connected_componentd.py (99%) rename tests/{ => transforms}/test_label_filter.py (99%) rename tests/{ => transforms}/test_label_to_contour.py (99%) rename tests/{ => transforms}/test_label_to_contourd.py (99%) rename tests/{ => transforms}/test_label_to_mask.py (99%) rename tests/{ => transforms}/test_label_to_maskd.py (99%) rename tests/{ => transforms}/test_load_image.py (99%) rename tests/{ => transforms}/test_load_imaged.py (99%) rename tests/{ => transforms}/test_load_spacing_orientation.py (97%) rename tests/{ => transforms}/test_map_and_generate_sampling_centers.py (99%) rename tests/{ => transforms}/test_map_binary_to_indices.py (99%) rename tests/{ => transforms}/test_map_classes_to_indices.py (99%) rename tests/{ => transforms}/test_map_label_value.py (99%) rename tests/{ => transforms}/test_map_label_valued.py (99%) rename tests/{ => transforms}/test_map_transform.py (100%) rename tests/{ => transforms}/test_mask_intensity.py (99%) rename tests/{ => transforms}/test_mask_intensityd.py (100%) rename tests/{ => transforms}/test_mean_ensemble.py (99%) rename tests/{ => transforms}/test_mean_ensembled.py (99%) rename tests/{ => transforms}/test_median_smooth.py (99%) rename tests/{ => transforms}/test_median_smoothd.py (99%) rename tests/{ => transforms}/test_morphological_ops.py (99%) rename tests/{ => transforms}/test_nifti_endianness.py (100%) rename tests/{ => transforms}/test_normalize_intensity.py (99%) rename tests/{ => transforms}/test_normalize_intensityd.py (99%) rename tests/{ => transforms}/test_nvtx_decorator.py (99%) rename tests/{ => transforms}/test_nvtx_transform.py (100%) rename tests/{ => transforms}/test_orientation.py (99%) rename tests/{ => transforms}/test_orientationd.py (99%) rename tests/{ => transforms}/test_rand_adjust_contrast.py (99%) rename tests/{ => transforms}/test_rand_adjust_contrastd.py (99%) rename tests/{ => transforms}/test_rand_affine.py (99%) rename tests/{ => transforms}/test_rand_affine_grid.py (99%) rename tests/{ => transforms}/test_rand_affined.py (99%) rename tests/{ => transforms}/test_rand_axis_flip.py (99%) rename tests/{ => transforms}/test_rand_axis_flipd.py (99%) rename tests/{ => transforms}/test_rand_bias_field.py (99%) rename tests/{ => transforms}/test_rand_bias_fieldd.py (100%) rename tests/{ => transforms}/test_rand_coarse_dropout.py (99%) rename tests/{ => transforms}/test_rand_coarse_dropoutd.py (100%) rename tests/{ => transforms}/test_rand_coarse_shuffle.py (100%) rename tests/{ => transforms}/test_rand_coarse_shuffled.py (100%) rename tests/{ => transforms}/test_rand_crop_by_label_classes.py (99%) rename tests/{ => transforms}/test_rand_crop_by_label_classesd.py (99%) rename tests/{ => transforms}/test_rand_crop_by_pos_neg_label.py (99%) rename tests/{ => transforms}/test_rand_crop_by_pos_neg_labeld.py (99%) rename tests/{ => transforms}/test_rand_cucim_dict_transform.py (99%) rename tests/{ => transforms}/test_rand_cucim_transform.py (99%) rename tests/{ => transforms}/test_rand_deform_grid.py (99%) rename tests/{ => transforms}/test_rand_elastic_2d.py (99%) rename tests/{ => transforms}/test_rand_elastic_3d.py (99%) rename tests/{ => transforms}/test_rand_elasticd_2d.py (99%) rename tests/{ => transforms}/test_rand_elasticd_3d.py (99%) rename tests/{ => transforms}/test_rand_flip.py (99%) rename tests/{ => transforms}/test_rand_flipd.py (99%) rename tests/{ => transforms}/test_rand_gaussian_noise.py (99%) rename tests/{ => transforms}/test_rand_gaussian_noised.py (99%) rename tests/{ => transforms}/test_rand_gaussian_sharpen.py (99%) rename tests/{ => transforms}/test_rand_gaussian_sharpend.py (99%) rename tests/{ => transforms}/test_rand_gaussian_smooth.py (99%) rename tests/{ => transforms}/test_rand_gaussian_smoothd.py (99%) rename tests/{ => transforms}/test_rand_gibbs_noise.py (99%) rename tests/{ => transforms}/test_rand_gibbs_noised.py (99%) rename tests/{ => transforms}/test_rand_grid_distortion.py (99%) rename tests/{ => transforms}/test_rand_grid_distortiond.py (99%) rename tests/{ => transforms}/test_rand_histogram_shift.py (99%) rename tests/{ => transforms}/test_rand_k_space_spike_noise.py (99%) rename tests/{ => transforms}/test_rand_k_space_spike_noised.py (99%) rename tests/{ => transforms}/test_rand_rician_noise.py (99%) rename tests/{ => transforms}/test_rand_rician_noised.py (99%) rename tests/{ => transforms}/test_rand_rotate.py (99%) rename tests/{ => transforms}/test_rand_rotate90.py (99%) rename tests/{ => transforms}/test_rand_rotate90d.py (99%) rename tests/{ => transforms}/test_rand_rotated.py (99%) rename tests/{ => transforms}/test_rand_scale_crop.py (100%) rename tests/{ => transforms}/test_rand_scale_cropd.py (100%) rename tests/{ => transforms}/test_rand_scale_intensity.py (99%) rename tests/{ => transforms}/test_rand_scale_intensity_fixed_mean.py (99%) rename tests/{ => transforms}/test_rand_scale_intensity_fixed_meand.py (99%) rename tests/{ => transforms}/test_rand_scale_intensityd.py (99%) rename tests/{ => transforms}/test_rand_shift_intensity.py (99%) rename tests/{ => transforms}/test_rand_shift_intensityd.py (99%) rename tests/{ => transforms}/test_rand_simulate_low_resolution.py (99%) rename tests/{ => transforms}/test_rand_simulate_low_resolutiond.py (99%) rename tests/{ => transforms}/test_rand_spatial_crop.py (100%) rename tests/{ => transforms}/test_rand_spatial_crop_samples.py (100%) rename tests/{ => transforms}/test_rand_spatial_crop_samplesd.py (99%) rename tests/{ => transforms}/test_rand_spatial_cropd.py (100%) rename tests/{ => transforms}/test_rand_std_shift_intensity.py (99%) rename tests/{ => transforms}/test_rand_std_shift_intensityd.py (99%) rename tests/{ => transforms}/test_rand_zoom.py (99%) rename tests/{ => transforms}/test_rand_zoomd.py (99%) rename tests/{ => transforms}/test_randidentity.py (99%) rename tests/{ => transforms}/test_random_order.py (98%) rename tests/{ => transforms}/test_randtorchvisiond.py (99%) rename tests/{ => transforms}/test_regularization.py (99%) rename tests/{ => transforms}/test_remove_repeated_channel.py (99%) rename tests/{ => transforms}/test_remove_repeated_channeld.py (99%) rename tests/{ => transforms}/test_repeat_channel.py (99%) rename tests/{ => transforms}/test_repeat_channeld.py (99%) rename tests/{ => transforms}/test_resample_backends.py (99%) rename tests/{ => transforms}/test_resample_to_match.py (99%) rename tests/{ => transforms}/test_resample_to_matchd.py (99%) rename tests/{ => transforms}/test_resampler.py (99%) rename tests/{ => transforms}/test_resize.py (99%) rename tests/{ => transforms}/test_resize_with_pad_or_crop.py (99%) rename tests/{ => transforms}/test_resize_with_pad_or_cropd.py (98%) rename tests/{ => transforms}/test_resized.py (99%) rename tests/{ => transforms}/test_rotate.py (99%) rename tests/{ => transforms}/test_rotate90.py (99%) rename tests/{ => transforms}/test_rotate90d.py (99%) rename tests/{ => transforms}/test_rotated.py (99%) rename tests/{ => transforms}/test_save_classificationd.py (100%) rename tests/{ => transforms}/test_save_image.py (100%) rename tests/{ => transforms}/test_save_imaged.py (100%) rename tests/{ => transforms}/test_savitzky_golay_smooth.py (99%) rename tests/{ => transforms}/test_savitzky_golay_smoothd.py (99%) rename tests/{ => transforms}/test_scale_intensity.py (99%) rename tests/{ => transforms}/test_scale_intensity_fixed_mean.py (99%) rename tests/{ => transforms}/test_scale_intensity_range.py (99%) rename tests/{ => transforms}/test_scale_intensity_ranged.py (99%) rename tests/{ => transforms}/test_scale_intensityd.py (99%) rename tests/{ => transforms}/test_select_itemsd.py (100%) rename tests/{ => transforms}/test_shift_intensity.py (99%) rename tests/{ => transforms}/test_shift_intensityd.py (99%) rename tests/{ => transforms}/test_signal_continuouswavelet.py (91%) rename tests/{ => transforms}/test_signal_fillempty.py (93%) rename tests/{ => transforms}/test_signal_fillemptyd.py (93%) rename tests/{ => transforms}/test_signal_rand_add_gaussiannoise.py (93%) rename tests/{ => transforms}/test_signal_rand_add_sine.py (93%) rename tests/{ => transforms}/test_signal_rand_add_sine_partial.py (93%) rename tests/{ => transforms}/test_signal_rand_add_squarepulse.py (94%) rename tests/{ => transforms}/test_signal_rand_add_squarepulse_partial.py (94%) rename tests/{ => transforms}/test_signal_rand_drop.py (92%) rename tests/{ => transforms}/test_signal_rand_scale.py (92%) rename tests/{ => transforms}/test_signal_rand_shift.py (93%) rename tests/{ => transforms}/test_signal_remove_frequency.py (95%) rename tests/{ => transforms}/test_smooth_field.py (99%) rename tests/{ => transforms}/test_sobel_gradient.py (100%) rename tests/{ => transforms}/test_sobel_gradientd.py (100%) rename tests/{ => transforms}/test_spacing.py (99%) rename tests/{ => transforms}/test_spacingd.py (99%) rename tests/{ => transforms}/test_spatial_crop.py (100%) rename tests/{ => transforms}/test_spatial_cropd.py (100%) rename tests/{ => transforms}/test_spatial_pad.py (100%) rename tests/{ => transforms}/test_spatial_padd.py (100%) rename tests/{ => transforms}/test_spatial_resample.py (99%) rename tests/{ => transforms}/test_squeezedim.py (99%) rename tests/{ => transforms}/test_squeezedimd.py (99%) rename tests/{ => transforms}/test_std_shift_intensity.py (99%) rename tests/{ => transforms}/test_std_shift_intensityd.py (99%) rename tests/{ => transforms}/test_threshold_intensity.py (99%) rename tests/{ => transforms}/test_threshold_intensityd.py (99%) rename tests/{ => transforms}/test_to_contiguous.py (99%) rename tests/{ => transforms}/test_to_cupy.py (99%) rename tests/{ => transforms}/test_to_cupyd.py (99%) rename tests/{ => transforms}/test_to_device.py (99%) rename tests/{ => transforms}/test_to_deviced.py (99%) rename tests/{ => transforms}/test_to_numpy.py (99%) rename tests/{ => transforms}/test_to_numpyd.py (99%) rename tests/{ => transforms}/test_to_pil.py (99%) rename tests/{ => transforms}/test_to_pild.py (99%) rename tests/{ => transforms}/test_to_tensor.py (99%) rename tests/{ => transforms}/test_to_tensord.py (99%) rename tests/{ => transforms}/test_torchvision.py (99%) rename tests/{ => transforms}/test_torchvisiond.py (99%) rename tests/{ => transforms}/test_transform.py (100%) rename tests/{ => transforms}/test_transpose.py (99%) rename tests/{ => transforms}/test_transposed.py (99%) rename tests/{ => transforms}/test_ultrasound_confidence_map_transform.py (98%) rename tests/{ => transforms}/test_utils_pytorch_numpy_unification.py (99%) rename tests/{ => transforms}/test_vote_ensemble.py (99%) rename tests/{ => transforms}/test_vote_ensembled.py (99%) rename tests/{ => transforms}/test_with_allow_missing_keys.py (100%) rename tests/{ => transforms}/test_zoom.py (99%) rename tests/{ => transforms}/test_zoomd.py (99%) create mode 100644 tests/transforms/transform/__init__.py rename tests/{ => transforms/transform}/test_randomizable.py (100%) rename tests/{ => transforms/transform}/test_randomizable_transform_type.py (100%) create mode 100644 tests/transforms/utility/__init__.py rename tests/{ => transforms/utility}/test_apply_transform_to_points.py (100%) rename tests/{ => transforms/utility}/test_apply_transform_to_pointsd.py (100%) rename tests/{ => transforms/utility}/test_identity.py (99%) rename tests/{ => transforms/utility}/test_identityd.py (99%) rename tests/{ => transforms/utility}/test_lambda.py (99%) rename tests/{ => transforms/utility}/test_lambdad.py (99%) rename tests/{ => transforms/utility}/test_rand_lambda.py (99%) rename tests/{ => transforms/utility}/test_rand_lambdad.py (99%) rename tests/{ => transforms/utility}/test_simulatedelay.py (99%) rename tests/{ => transforms/utility}/test_simulatedelayd.py (99%) rename tests/{ => transforms/utility}/test_splitdim.py (99%) rename tests/{ => transforms/utility}/test_splitdimd.py (100%) create mode 100644 tests/transforms/utils/__init__.py rename tests/{ => transforms/utils}/test_correct_crop_centers.py (99%) rename tests/{ => transforms/utils}/test_get_unique_labels.py (99%) rename tests/{ => transforms/utils}/test_print_transform_backends.py (100%) rename tests/{ => transforms/utils}/test_soft_clip.py (100%) create mode 100644 tests/utils/__init__.py create mode 100644 tests/utils/enums/__init__.py rename tests/{ => utils/enums}/test_hovernet_loss.py (100%) rename tests/{ => utils/enums}/test_ordering.py (100%) rename tests/{ => utils/enums}/test_wsireader.py (98%) create mode 100644 tests/utils/misc/__init__.py rename tests/{ => utils/misc}/test_ensure_tuple.py (99%) rename tests/{ => utils/misc}/test_monai_env_vars.py (100%) rename tests/{ => utils/misc}/test_monai_utils_misc.py (100%) rename tests/{ => utils/misc}/test_str2bool.py (100%) rename tests/{ => utils/misc}/test_str2list.py (100%) rename tests/{ => utils}/test_alias.py (92%) rename tests/{ => utils}/test_component_store.py (100%) rename tests/{ => utils}/test_deprecated.py (100%) rename tests/{ => utils}/test_enum_bound_interp.py (99%) rename tests/{ => utils}/test_evenly_divisible_all_gather_dist.py (99%) rename tests/{ => utils}/test_get_package_version.py (100%) rename tests/{ => utils}/test_handler_logfile.py (99%) rename tests/{ => utils}/test_handler_metric_logger.py (99%) rename tests/{ => utils}/test_list_to_dict.py (100%) rename tests/{ => utils}/test_look_up_option.py (100%) rename tests/{ => utils}/test_optional_import.py (100%) rename tests/{ => utils}/test_pad_mode.py (99%) rename tests/{ => utils}/test_profiling.py (99%) rename tests/{ => utils}/test_rankfilter_dist.py (99%) rename tests/{ => utils}/test_require_pkg.py (100%) rename tests/{ => utils}/test_sample_slices.py (99%) rename tests/{ => utils}/test_set_determinism.py (99%) rename tests/{ => utils}/test_squeeze_unsqueeze.py (100%) rename tests/{ => utils}/test_state_cacher.py (100%) rename tests/{ => utils}/test_torchscript_utils.py (100%) rename tests/{ => utils}/test_version.py (100%) rename tests/{ => utils}/test_version_after.py (100%) create mode 100644 tests/utils/type_conversion/__init__.py rename tests/{ => utils/type_conversion}/test_convert_data_type.py (99%) rename tests/{ => utils/type_conversion}/test_get_equivalent_dtype.py (99%) rename tests/{ => utils/type_conversion}/test_safe_dtype_range.py (99%) create mode 100644 tests/visualize/__init__.py rename tests/{ => visualize}/test_img2tensorboard.py (100%) rename tests/{ => visualize}/test_occlusion_sensitivity.py (100%) rename tests/{ => visualize}/test_plot_2d_or_3d_image.py (99%) rename tests/{ => visualize}/test_vis_cam.py (100%) rename tests/{ => visualize}/test_vis_gradcam.py (99%) create mode 100644 tests/visualize/utils/__init__.py rename tests/{ => visualize/utils}/test_blend_images.py (99%) rename tests/{ => visualize/utils}/test_matshow3d.py (92%) diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 1023cd7a7d..49b0665a90 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -11,7 +11,6 @@ """ A collection of "vanilla" transforms for IO functions. """ - from __future__ import annotations import inspect diff --git a/runtests.sh b/runtests.sh index 65e3a2bb6b..2a399d5c3a 100755 --- a/runtests.sh +++ b/runtests.sh @@ -728,7 +728,7 @@ if [ $doDistTests = true ] then echo "${separator}${blue}run distributed unit test cases${noColor}" torch_validate - for i in tests/test_*_dist.py + for i in $(find ./tests/ -name "*_dist.py") do echo "$i" ${cmdPrefix}${cmd} "$i" @@ -740,7 +740,7 @@ if [ $doNetTests = true ] then set +e # disable exit on failure so that diagnostics can be given on failure echo "${separator}${blue}integration${noColor}" - for i in tests/*integration_*.py + for i in tests/integration/*.py do echo "$i" ${cmdPrefix}${cmd} "$i" diff --git a/tests/apps/__init__.py b/tests/apps/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/apps/deepedit/__init__.py b/tests/apps/deepedit/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/deepedit/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_deepedit_transforms.py b/tests/apps/deepedit/test_deepedit_transforms.py similarity index 100% rename from tests/test_deepedit_transforms.py rename to tests/apps/deepedit/test_deepedit_transforms.py diff --git a/tests/apps/deepgrow/__init__.py b/tests/apps/deepgrow/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/deepgrow/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_deepgrow_dataset.py b/tests/apps/deepgrow/test_deepgrow_dataset.py similarity index 100% rename from tests/test_deepgrow_dataset.py rename to tests/apps/deepgrow/test_deepgrow_dataset.py diff --git a/tests/apps/deepgrow/transforms/__init__.py b/tests/apps/deepgrow/transforms/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/deepgrow/transforms/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_deepgrow_interaction.py b/tests/apps/deepgrow/transforms/test_deepgrow_interaction.py similarity index 100% rename from tests/test_deepgrow_interaction.py rename to tests/apps/deepgrow/transforms/test_deepgrow_interaction.py diff --git a/tests/test_deepgrow_transforms.py b/tests/apps/deepgrow/transforms/test_deepgrow_transforms.py similarity index 100% rename from tests/test_deepgrow_transforms.py rename to tests/apps/deepgrow/transforms/test_deepgrow_transforms.py diff --git a/tests/apps/detection/__init__.py b/tests/apps/detection/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/detection/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/apps/detection/metrics/__init__.py b/tests/apps/detection/metrics/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/detection/metrics/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_detection_coco_metrics.py b/tests/apps/detection/metrics/test_detection_coco_metrics.py similarity index 100% rename from tests/test_detection_coco_metrics.py rename to tests/apps/detection/metrics/test_detection_coco_metrics.py diff --git a/tests/apps/detection/networks/__init__.py b/tests/apps/detection/networks/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/detection/networks/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_retinanet.py b/tests/apps/detection/networks/test_retinanet.py similarity index 99% rename from tests/test_retinanet.py rename to tests/apps/detection/networks/test_retinanet.py index a24f5b208c..240fd3a9e2 100644 --- a/tests/test_retinanet.py +++ b/tests/apps/detection/networks/test_retinanet.py @@ -101,7 +101,6 @@ @unittest.skipUnless(has_torchvision, "Requires torchvision") @skip_if_quick class TestRetinaNet(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_retina_shape(self, model, input_param, input_shape): backbone = model(**input_param) diff --git a/tests/test_retinanet_detector.py b/tests/apps/detection/networks/test_retinanet_detector.py similarity index 99% rename from tests/test_retinanet_detector.py rename to tests/apps/detection/networks/test_retinanet_detector.py index e5ff7e211a..b91ea46b4b 100644 --- a/tests/test_retinanet_detector.py +++ b/tests/apps/detection/networks/test_retinanet_detector.py @@ -93,7 +93,6 @@ class NaiveNetwork(torch.nn.Module): - def __init__(self, spatial_dims, num_classes, **kwargs): super().__init__() self.spatial_dims = spatial_dims @@ -115,7 +114,6 @@ def forward(self, images): @unittest.skipUnless(has_torchvision, "Requires torchvision") @skip_if_quick class TestRetinaNetDetector(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_retina_detector_resnet_backbone_shape(self, input_param, input_shape): returned_layers = [1] diff --git a/tests/test_box_transform.py b/tests/apps/detection/test_box_transform.py similarity index 99% rename from tests/test_box_transform.py rename to tests/apps/detection/test_box_transform.py index 4084fab88b..56929eafc2 100644 --- a/tests/test_box_transform.py +++ b/tests/apps/detection/test_box_transform.py @@ -79,7 +79,6 @@ class TestBoxTransform(unittest.TestCase): - @parameterized.expand(TESTS_2D_mask) def test_value_2d_mask(self, mask, expected_box_label): box_label = convert_mask_to_box(mask) diff --git a/tests/apps/detection/utils/__init__.py b/tests/apps/detection/utils/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/detection/utils/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_anchor_box.py b/tests/apps/detection/utils/test_anchor_box.py similarity index 99% rename from tests/test_anchor_box.py rename to tests/apps/detection/utils/test_anchor_box.py index 531f708aae..7543c84ed9 100644 --- a/tests/test_anchor_box.py +++ b/tests/apps/detection/utils/test_anchor_box.py @@ -42,7 +42,6 @@ @SkipIfBeforePyTorchVersion((1, 11)) @unittest.skipUnless(has_torchvision, "Requires torchvision") class TestAnchorGenerator(unittest.TestCase): - @parameterized.expand(TEST_CASES_2D) def test_anchor_2d(self, input_param, image_shape, feature_maps_shapes): torch_anchor_utils, _ = optional_import("torchvision.models.detection.anchor_utils") diff --git a/tests/test_atss_box_matcher.py b/tests/apps/detection/utils/test_atss_box_matcher.py similarity index 99% rename from tests/test_atss_box_matcher.py rename to tests/apps/detection/utils/test_atss_box_matcher.py index fa8462232e..1a28f0d211 100644 --- a/tests/test_atss_box_matcher.py +++ b/tests/apps/detection/utils/test_atss_box_matcher.py @@ -33,7 +33,6 @@ class TestATSS(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_atss(self, input_param, boxes, anchors, num_anchors_per_level, num_anchors_per_loc, expected_matches): matcher = ATSSMatcher(**input_param, debug=True) diff --git a/tests/test_box_coder.py b/tests/apps/detection/utils/test_box_coder.py similarity index 99% rename from tests/test_box_coder.py rename to tests/apps/detection/utils/test_box_coder.py index e253b30531..90d9444355 100644 --- a/tests/test_box_coder.py +++ b/tests/apps/detection/utils/test_box_coder.py @@ -21,7 +21,6 @@ class TestBoxTransform(unittest.TestCase): - def test_value(self): box_coder = BoxCoder(weights=[1, 1, 1, 1, 1, 1]) test_dtype = [torch.float32, torch.float16] diff --git a/tests/test_detector_boxselector.py b/tests/apps/detection/utils/test_detector_boxselector.py similarity index 99% rename from tests/test_detector_boxselector.py rename to tests/apps/detection/utils/test_detector_boxselector.py index a252ef15e9..6ddcc85b7e 100644 --- a/tests/test_detector_boxselector.py +++ b/tests/apps/detection/utils/test_detector_boxselector.py @@ -56,7 +56,6 @@ class TestBoxSelector(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_box_selector(self, input_param, boxes, logits, image_shape, expected_results): box_selector = BoxSelector(**input_param) diff --git a/tests/test_detector_utils.py b/tests/apps/detection/utils/test_detector_utils.py similarity index 99% rename from tests/test_detector_utils.py rename to tests/apps/detection/utils/test_detector_utils.py index d84719cf3f..56bea31ff4 100644 --- a/tests/test_detector_utils.py +++ b/tests/apps/detection/utils/test_detector_utils.py @@ -79,7 +79,6 @@ class TestDetectorUtils(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_detector_utils(self, input_param, input_shape, expected_shape): size_divisible = 32 * ensure_tuple(input_param["conv1_t_stride"])[0] diff --git a/tests/test_hardnegsampler.py b/tests/apps/detection/utils/test_hardnegsampler.py similarity index 99% rename from tests/test_hardnegsampler.py rename to tests/apps/detection/utils/test_hardnegsampler.py index a0a2743bf7..4a3c03bcad 100644 --- a/tests/test_hardnegsampler.py +++ b/tests/apps/detection/utils/test_hardnegsampler.py @@ -37,7 +37,6 @@ class TestSampleSlices(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_shape(self, target_label0, target_label1, concat_fg_probs, expected_result_pos, expected_result_neg): compute_dtypes = [torch.float16, torch.float32] diff --git a/tests/apps/maisi/networks/__init__.py b/tests/apps/maisi/networks/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/maisi/networks/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_autoencoderkl_maisi.py b/tests/apps/maisi/networks/test_autoencoderkl_maisi.py similarity index 99% rename from tests/test_autoencoderkl_maisi.py rename to tests/apps/maisi/networks/test_autoencoderkl_maisi.py index 99f1dbdc76..6b9aae1d17 100644 --- a/tests/test_autoencoderkl_maisi.py +++ b/tests/apps/maisi/networks/test_autoencoderkl_maisi.py @@ -77,7 +77,6 @@ class TestAutoencoderKlMaisi(unittest.TestCase): - @parameterized.expand(CASES) def test_shape(self, input_param, input_shape, expected_shape, expected_latent_shape): net = AutoencoderKlMaisi(**input_param).to(device) diff --git a/tests/test_controlnet_maisi.py b/tests/apps/maisi/networks/test_controlnet_maisi.py similarity index 99% rename from tests/test_controlnet_maisi.py rename to tests/apps/maisi/networks/test_controlnet_maisi.py index 0166c33662..2668398350 100644 --- a/tests/test_controlnet_maisi.py +++ b/tests/apps/maisi/networks/test_controlnet_maisi.py @@ -129,7 +129,6 @@ @SkipIfBeforePyTorchVersion((2, 0)) class TestControlNet(unittest.TestCase): - @parameterized.expand(TEST_CASES) @skipUnless(has_einops, "Requires einops") def test_shape_unconditioned_models(self, input_param, expected_num_down_blocks_residuals, expected_shape): diff --git a/tests/test_diffusion_model_unet_maisi.py b/tests/apps/maisi/networks/test_diffusion_model_unet_maisi.py similarity index 100% rename from tests/test_diffusion_model_unet_maisi.py rename to tests/apps/maisi/networks/test_diffusion_model_unet_maisi.py diff --git a/tests/apps/nuclick/__init__.py b/tests/apps/nuclick/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/nuclick/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_nuclick_transforms.py b/tests/apps/nuclick/test_nuclick_transforms.py similarity index 100% rename from tests/test_nuclick_transforms.py rename to tests/apps/nuclick/test_nuclick_transforms.py diff --git a/tests/apps/pathology/__init__.py b/tests/apps/pathology/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/pathology/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/apps/pathology/handlers/__init__.py b/tests/apps/pathology/handlers/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/pathology/handlers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_from_engine_hovernet.py b/tests/apps/pathology/handlers/test_from_engine_hovernet.py similarity index 99% rename from tests/test_from_engine_hovernet.py rename to tests/apps/pathology/handlers/test_from_engine_hovernet.py index bed464ef49..5e9c319205 100644 --- a/tests/test_from_engine_hovernet.py +++ b/tests/apps/pathology/handlers/test_from_engine_hovernet.py @@ -28,7 +28,6 @@ class TestFromEngineHovernet(unittest.TestCase): - @parameterized.expand(CASES) def test_results(self, input, expected): output = from_engine_hovernet(keys=["A", "B"], nested_key="C")(input) diff --git a/tests/test_lesion_froc.py b/tests/apps/pathology/test_lesion_froc.py similarity index 100% rename from tests/test_lesion_froc.py rename to tests/apps/pathology/test_lesion_froc.py diff --git a/tests/test_pathology_prob_nms.py b/tests/apps/pathology/test_pathology_prob_nms.py similarity index 100% rename from tests/test_pathology_prob_nms.py rename to tests/apps/pathology/test_pathology_prob_nms.py diff --git a/tests/test_prepare_batch_hovernet.py b/tests/apps/pathology/test_prepare_batch_hovernet.py similarity index 99% rename from tests/test_prepare_batch_hovernet.py rename to tests/apps/pathology/test_prepare_batch_hovernet.py index dcff4cfb5b..d29aed2312 100644 --- a/tests/test_prepare_batch_hovernet.py +++ b/tests/apps/pathology/test_prepare_batch_hovernet.py @@ -35,7 +35,6 @@ def forward(self, x: torch.Tensor): class TestPrepareBatchHoVerNet(unittest.TestCase): - @parameterized.expand([TEST_CASE_0]) def test_content(self, input_args, expected_value): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") diff --git a/tests/test_sliding_window_hovernet_inference.py b/tests/apps/pathology/test_sliding_window_hovernet_inference.py similarity index 99% rename from tests/test_sliding_window_hovernet_inference.py rename to tests/apps/pathology/test_sliding_window_hovernet_inference.py index 6fc9240a13..ce39c905d2 100644 --- a/tests/test_sliding_window_hovernet_inference.py +++ b/tests/apps/pathology/test_sliding_window_hovernet_inference.py @@ -21,7 +21,7 @@ from monai.data import MetaTensor from monai.inferers import sliding_window_inference from monai.utils import optional_import -from tests.test_sliding_window_inference import TEST_CASES +from tests.inferers.test_sliding_window_inference import TEST_CASES _, has_tqdm = optional_import("tqdm") @@ -36,7 +36,6 @@ class TestSlidingWindowHoVerNetInference(unittest.TestCase): - @parameterized.expand(TEST_CASES_PADDING) def test_sliding_window_with_padding( self, key, image_shape, roi_shape, sw_batch_size, overlap, mode, device, extra_input_padding diff --git a/tests/apps/pathology/transforms/__init__.py b/tests/apps/pathology/transforms/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/pathology/transforms/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/apps/pathology/transforms/post/__init__.py b/tests/apps/pathology/transforms/post/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/pathology/transforms/post/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_generate_distance_map.py b/tests/apps/pathology/transforms/post/test_generate_distance_map.py similarity index 99% rename from tests/test_generate_distance_map.py rename to tests/apps/pathology/transforms/post/test_generate_distance_map.py index ded3a124dd..c41f2036c5 100644 --- a/tests/test_generate_distance_map.py +++ b/tests/apps/pathology/transforms/post/test_generate_distance_map.py @@ -36,7 +36,6 @@ class TestGenerateDistanceMap(unittest.TestCase): - @parameterized.expand(EXCEPTION_TESTS) def test_value(self, arguments, mask, probmap, exception_type): with self.assertRaises(exception_type): diff --git a/tests/test_generate_distance_mapd.py b/tests/apps/pathology/transforms/post/test_generate_distance_mapd.py similarity index 99% rename from tests/test_generate_distance_mapd.py rename to tests/apps/pathology/transforms/post/test_generate_distance_mapd.py index 04cfc2f776..44a7809034 100644 --- a/tests/test_generate_distance_mapd.py +++ b/tests/apps/pathology/transforms/post/test_generate_distance_mapd.py @@ -55,7 +55,6 @@ class TestGenerateDistanceMapd(unittest.TestCase): - @parameterized.expand(EXCEPTION_TESTS) def test_value(self, arguments, mask, border_map, exception_type): with self.assertRaises(exception_type): diff --git a/tests/test_generate_instance_border.py b/tests/apps/pathology/transforms/post/test_generate_instance_border.py similarity index 99% rename from tests/test_generate_instance_border.py rename to tests/apps/pathology/transforms/post/test_generate_instance_border.py index 9d9c5bc7d8..3ebf6a90b5 100644 --- a/tests/test_generate_instance_border.py +++ b/tests/apps/pathology/transforms/post/test_generate_instance_border.py @@ -34,7 +34,6 @@ class TestGenerateInstanceBorder(unittest.TestCase): - @parameterized.expand(EXCEPTION_TESTS) def test_value(self, arguments, mask, hover_map, exception_type): with self.assertRaises(exception_type): diff --git a/tests/test_generate_instance_borderd.py b/tests/apps/pathology/transforms/post/test_generate_instance_borderd.py similarity index 99% rename from tests/test_generate_instance_borderd.py rename to tests/apps/pathology/transforms/post/test_generate_instance_borderd.py index 1cbf99cee3..6f85b43313 100644 --- a/tests/test_generate_instance_borderd.py +++ b/tests/apps/pathology/transforms/post/test_generate_instance_borderd.py @@ -44,7 +44,6 @@ class TestGenerateInstanceBorderd(unittest.TestCase): - @parameterized.expand(EXCEPTION_TESTS) def test_value(self, arguments, mask, hover_map, exception_type): with self.assertRaises(exception_type): diff --git a/tests/test_generate_instance_centroid.py b/tests/apps/pathology/transforms/post/test_generate_instance_centroid.py similarity index 99% rename from tests/test_generate_instance_centroid.py rename to tests/apps/pathology/transforms/post/test_generate_instance_centroid.py index 051d555dff..78cdfaed09 100644 --- a/tests/test_generate_instance_centroid.py +++ b/tests/apps/pathology/transforms/post/test_generate_instance_centroid.py @@ -41,7 +41,6 @@ @unittest.skipUnless(has_skimage, "Requires scikit-image library.") class TestGenerateInstanceCentroid(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_shape(self, in_type, test_data, offset, expected): inst_bbox = get_bbox(test_data[None]) diff --git a/tests/test_generate_instance_centroidd.py b/tests/apps/pathology/transforms/post/test_generate_instance_centroidd.py similarity index 99% rename from tests/test_generate_instance_centroidd.py rename to tests/apps/pathology/transforms/post/test_generate_instance_centroidd.py index b3cee1872b..568bd6348c 100644 --- a/tests/test_generate_instance_centroidd.py +++ b/tests/apps/pathology/transforms/post/test_generate_instance_centroidd.py @@ -41,7 +41,6 @@ @unittest.skipUnless(has_skimage, "Requires scikit-image library.") class TestGenerateInstanceCentroidd(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_shape(self, in_type, test_data, offset, expected): inst_bbox = get_bbox(test_data[None]) diff --git a/tests/test_generate_instance_contour.py b/tests/apps/pathology/transforms/post/test_generate_instance_contour.py similarity index 99% rename from tests/test_generate_instance_contour.py rename to tests/apps/pathology/transforms/post/test_generate_instance_contour.py index 0346536db9..43808a559d 100644 --- a/tests/test_generate_instance_contour.py +++ b/tests/apps/pathology/transforms/post/test_generate_instance_contour.py @@ -46,7 +46,6 @@ @unittest.skipUnless(has_skimage, "Requires scikit-image library.") class TestGenerateInstanceContour(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_shape(self, in_type, test_data, min_num_points, offset, expected): inst_bbox = get_bbox(test_data[None]) diff --git a/tests/test_generate_instance_contourd.py b/tests/apps/pathology/transforms/post/test_generate_instance_contourd.py similarity index 99% rename from tests/test_generate_instance_contourd.py rename to tests/apps/pathology/transforms/post/test_generate_instance_contourd.py index 2a572e5932..640ca81c0b 100644 --- a/tests/test_generate_instance_contourd.py +++ b/tests/apps/pathology/transforms/post/test_generate_instance_contourd.py @@ -46,7 +46,6 @@ @unittest.skipUnless(has_skimage, "Requires scikit-image library.") class TestGenerateInstanceContourd(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_shape(self, in_type, test_data, min_num_points, offset, expected): inst_bbox = get_bbox(test_data[None]) diff --git a/tests/test_generate_instance_type.py b/tests/apps/pathology/transforms/post/test_generate_instance_type.py similarity index 99% rename from tests/test_generate_instance_type.py rename to tests/apps/pathology/transforms/post/test_generate_instance_type.py index 6e86beafb5..8628d941e1 100644 --- a/tests/test_generate_instance_type.py +++ b/tests/apps/pathology/transforms/post/test_generate_instance_type.py @@ -41,7 +41,6 @@ class TestGenerateInstanceType(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_shape(self, in_type, type_pred, seg_pred, bbox, expected): result = GenerateInstanceType()(in_type(type_pred[None]), in_type(seg_pred[None]), bbox, 1) diff --git a/tests/test_generate_instance_typed.py b/tests/apps/pathology/transforms/post/test_generate_instance_typed.py similarity index 99% rename from tests/test_generate_instance_typed.py rename to tests/apps/pathology/transforms/post/test_generate_instance_typed.py index 6088d672de..d293d66ebb 100644 --- a/tests/test_generate_instance_typed.py +++ b/tests/apps/pathology/transforms/post/test_generate_instance_typed.py @@ -41,7 +41,6 @@ class TestGenerateInstanceTyped(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_shape(self, in_type, type_pred, seg_pred, bbox, expected): test_data = {"type_pred": in_type(type_pred[None]), "seg": in_type(seg_pred[None]), "bbox": bbox, "id": 1} diff --git a/tests/test_generate_succinct_contour.py b/tests/apps/pathology/transforms/post/test_generate_succinct_contour.py similarity index 100% rename from tests/test_generate_succinct_contour.py rename to tests/apps/pathology/transforms/post/test_generate_succinct_contour.py diff --git a/tests/test_generate_succinct_contourd.py b/tests/apps/pathology/transforms/post/test_generate_succinct_contourd.py similarity index 100% rename from tests/test_generate_succinct_contourd.py rename to tests/apps/pathology/transforms/post/test_generate_succinct_contourd.py diff --git a/tests/test_generate_watershed_markers.py b/tests/apps/pathology/transforms/post/test_generate_watershed_markers.py similarity index 99% rename from tests/test_generate_watershed_markers.py rename to tests/apps/pathology/transforms/post/test_generate_watershed_markers.py index 73f7851f0d..e03f9e0652 100644 --- a/tests/test_generate_watershed_markers.py +++ b/tests/apps/pathology/transforms/post/test_generate_watershed_markers.py @@ -38,7 +38,6 @@ @unittest.skipUnless(has_skimage, "Requires scikit-image library.") @unittest.skipUnless(has_scipy, "Requires scipy library.") class TestGenerateWatershedMarkers(unittest.TestCase): - @parameterized.expand(EXCEPTION_TESTS) def test_value(self, arguments, mask, probmap, exception_type): with self.assertRaises(exception_type): diff --git a/tests/test_generate_watershed_markersd.py b/tests/apps/pathology/transforms/post/test_generate_watershed_markersd.py similarity index 99% rename from tests/test_generate_watershed_markersd.py rename to tests/apps/pathology/transforms/post/test_generate_watershed_markersd.py index 36ad113653..d4b59a63a4 100644 --- a/tests/test_generate_watershed_markersd.py +++ b/tests/apps/pathology/transforms/post/test_generate_watershed_markersd.py @@ -68,7 +68,6 @@ @unittest.skipUnless(has_skimage, "Requires scikit-image library.") @unittest.skipUnless(has_scipy, "Requires scipy library.") class TestGenerateWatershedMarkersd(unittest.TestCase): - @parameterized.expand(EXCEPTION_TESTS) def test_value(self, arguments, mask, border_map, exception_type): with self.assertRaises(exception_type): diff --git a/tests/test_generate_watershed_mask.py b/tests/apps/pathology/transforms/post/test_generate_watershed_mask.py similarity index 99% rename from tests/test_generate_watershed_mask.py rename to tests/apps/pathology/transforms/post/test_generate_watershed_mask.py index b4728062db..10429cbf12 100644 --- a/tests/test_generate_watershed_mask.py +++ b/tests/apps/pathology/transforms/post/test_generate_watershed_mask.py @@ -58,7 +58,6 @@ @unittest.skipUnless(has_scipy, "Requires scipy library.") class TestGenerateWatershedMask(unittest.TestCase): - @parameterized.expand(EXCEPTION_TESTS) def test_value(self, arguments, exception_type): with self.assertRaises(exception_type): diff --git a/tests/test_generate_watershed_maskd.py b/tests/apps/pathology/transforms/post/test_generate_watershed_maskd.py similarity index 99% rename from tests/test_generate_watershed_maskd.py rename to tests/apps/pathology/transforms/post/test_generate_watershed_maskd.py index 863e01be83..24caa7b4b9 100644 --- a/tests/test_generate_watershed_maskd.py +++ b/tests/apps/pathology/transforms/post/test_generate_watershed_maskd.py @@ -58,7 +58,6 @@ @unittest.skipUnless(has_scipy, "Requires scipy library.") class TestGenerateWatershedMaskd(unittest.TestCase): - @parameterized.expand(EXCEPTION_TESTS) def test_value(self, arguments, exception_type): with self.assertRaises(exception_type): diff --git a/tests/test_hovernet_instance_map_post_processing.py b/tests/apps/pathology/transforms/post/test_hovernet_instance_map_post_processing.py similarity index 99% rename from tests/test_hovernet_instance_map_post_processing.py rename to tests/apps/pathology/transforms/post/test_hovernet_instance_map_post_processing.py index 4e939eba0f..f9df25f2e2 100644 --- a/tests/test_hovernet_instance_map_post_processing.py +++ b/tests/apps/pathology/transforms/post/test_hovernet_instance_map_post_processing.py @@ -42,7 +42,6 @@ @unittest.skipUnless(has_scipy, "Requires scipy library.") @unittest.skipUnless(has_skimage, "Requires scikit-image library.") class TestHoVerNetInstanceMapPostProcessing(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_value(self, in_type, test_data, kwargs, expected_info, expected_map): nuclear_prediction = in_type(test_data.astype(float)) diff --git a/tests/test_hovernet_instance_map_post_processingd.py b/tests/apps/pathology/transforms/post/test_hovernet_instance_map_post_processingd.py similarity index 99% rename from tests/test_hovernet_instance_map_post_processingd.py rename to tests/apps/pathology/transforms/post/test_hovernet_instance_map_post_processingd.py index 2963e4fa39..216c13476c 100644 --- a/tests/test_hovernet_instance_map_post_processingd.py +++ b/tests/apps/pathology/transforms/post/test_hovernet_instance_map_post_processingd.py @@ -43,7 +43,6 @@ @unittest.skipUnless(has_scipy, "Requires scipy library.") @unittest.skipUnless(has_skimage, "Requires scikit-image library.") class TestHoVerNetInstanceMapPostProcessingd(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_value(self, in_type, test_data, kwargs, expected_info, expected_map): input = { diff --git a/tests/test_hovernet_nuclear_type_post_processing.py b/tests/apps/pathology/transforms/post/test_hovernet_nuclear_type_post_processing.py similarity index 99% rename from tests/test_hovernet_nuclear_type_post_processing.py rename to tests/apps/pathology/transforms/post/test_hovernet_nuclear_type_post_processing.py index 77e0ab9a2c..289a6e4fcf 100644 --- a/tests/test_hovernet_nuclear_type_post_processing.py +++ b/tests/apps/pathology/transforms/post/test_hovernet_nuclear_type_post_processing.py @@ -41,7 +41,6 @@ @unittest.skipUnless(has_scipy, "Requires scipy library.") @unittest.skipUnless(has_skimage, "Requires scikit-image library.") class TestHoVerNetNuclearTypePostProcessing(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_value(self, in_type, test_data, kwargs, expected_info, expected_map): nuclear_prediction = in_type(test_data.astype(float)) diff --git a/tests/test_watershed.py b/tests/apps/pathology/transforms/post/test_watershed.py similarity index 99% rename from tests/test_watershed.py rename to tests/apps/pathology/transforms/post/test_watershed.py index bef4a7a8d0..57fc6393ef 100644 --- a/tests/test_watershed.py +++ b/tests/apps/pathology/transforms/post/test_watershed.py @@ -43,7 +43,6 @@ @unittest.skipUnless(has_skimage, "Requires scikit-image library.") @unittest.skipUnless(has_scipy, "Requires scipy library.") class TestWatershed(unittest.TestCase): - @parameterized.expand(TESTS) def test_output(self, args, image, hover_map, expected_shape): mask = GenerateWatershedMask()(image) diff --git a/tests/test_watershedd.py b/tests/apps/pathology/transforms/post/test_watershedd.py similarity index 99% rename from tests/test_watershedd.py rename to tests/apps/pathology/transforms/post/test_watershedd.py index 7a6067e8a1..2162802c36 100644 --- a/tests/test_watershedd.py +++ b/tests/apps/pathology/transforms/post/test_watershedd.py @@ -48,7 +48,6 @@ @unittest.skipUnless(has_skimage, "Requires scikit-image library.") @unittest.skipUnless(has_scipy, "Requires scipy library.") class TestWatershedd(unittest.TestCase): - @parameterized.expand(TESTS) def test_output(self, args, image, hover_map, expected_shape): data = {"output": image, "hover_map": hover_map} diff --git a/tests/test_pathology_he_stain.py b/tests/apps/pathology/transforms/test_pathology_he_stain.py similarity index 100% rename from tests/test_pathology_he_stain.py rename to tests/apps/pathology/transforms/test_pathology_he_stain.py diff --git a/tests/test_pathology_he_stain_dict.py b/tests/apps/pathology/transforms/test_pathology_he_stain_dict.py similarity index 100% rename from tests/test_pathology_he_stain_dict.py rename to tests/apps/pathology/transforms/test_pathology_he_stain_dict.py diff --git a/tests/apps/reconstruction/__init__.py b/tests/apps/reconstruction/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/reconstruction/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/apps/reconstruction/nets/__init__.py b/tests/apps/reconstruction/nets/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/reconstruction/nets/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_recon_net_utils.py b/tests/apps/reconstruction/nets/test_recon_net_utils.py similarity index 96% rename from tests/test_recon_net_utils.py rename to tests/apps/reconstruction/nets/test_recon_net_utils.py index 5f4a132624..6d1c17f851 100644 --- a/tests/test_recon_net_utils.py +++ b/tests/apps/reconstruction/nets/test_recon_net_utils.py @@ -35,7 +35,7 @@ TEST_RESHAPE = [(im_2d,), (im_3d,)] # normalize test case -im_2d, im_3d = torch.randint(0, 3, [3, 4, 50, 70]).float(), torch.randint(0, 3, [3, 4, 50, 70, 80]).float() +im_2d, im_3d = (torch.randint(0, 3, [3, 4, 50, 70]).float(), torch.randint(0, 3, [3, 4, 50, 70, 80]).float()) TEST_NORMALIZE = [(im_2d,), (im_3d,)] # pad test case @@ -49,7 +49,6 @@ class TestReconNetUtils(unittest.TestCase): - @parameterized.expand(TEST_RESHAPE) def test_reshape_channel_complex(self, test_data): result = reshape_complex_to_channel_dim(test_data) diff --git a/tests/test_complex_utils.py b/tests/apps/reconstruction/test_complex_utils.py similarity index 99% rename from tests/test_complex_utils.py rename to tests/apps/reconstruction/test_complex_utils.py index 26caa82438..e8ca4e0742 100644 --- a/tests/test_complex_utils.py +++ b/tests/apps/reconstruction/test_complex_utils.py @@ -51,7 +51,6 @@ class TestMRIUtils(unittest.TestCase): - @parameterized.expand(TESTS) def test_to_tensor_complex(self, test_data, expected_shape): result = convert_to_tensor_complex(test_data) diff --git a/tests/test_fastmri_reader.py b/tests/apps/reconstruction/test_fastmri_reader.py similarity index 99% rename from tests/test_fastmri_reader.py rename to tests/apps/reconstruction/test_fastmri_reader.py index f086146169..f1d60ac630 100644 --- a/tests/test_fastmri_reader.py +++ b/tests/apps/reconstruction/test_fastmri_reader.py @@ -66,7 +66,6 @@ @SkipIfNoModule("h5py") class TestMRIUtils(unittest.TestCase): - @parameterized.expand([TEST_CASE1, TEST_CASE2]) def test_get_data(self, test_data, test_res, test_meta): reader = FastMRIReader() diff --git a/tests/test_mri_utils.py b/tests/apps/reconstruction/test_mri_utils.py similarity index 99% rename from tests/test_mri_utils.py rename to tests/apps/reconstruction/test_mri_utils.py index e2ebb30b67..9102de2e68 100644 --- a/tests/test_mri_utils.py +++ b/tests/apps/reconstruction/test_mri_utils.py @@ -27,7 +27,6 @@ class TestMRIUtils(unittest.TestCase): - @parameterized.expand(TESTS) def test_rss(self, test_data, res_data): result = root_sum_of_squares(test_data, spatial_dim=1) diff --git a/tests/apps/reconstruction/transforms/__init__.py b/tests/apps/reconstruction/transforms/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/reconstruction/transforms/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_kspace_mask.py b/tests/apps/reconstruction/transforms/test_kspace_mask.py similarity index 100% rename from tests/test_kspace_mask.py rename to tests/apps/reconstruction/transforms/test_kspace_mask.py diff --git a/tests/test_reference_based_normalize_intensity.py b/tests/apps/reconstruction/transforms/test_reference_based_normalize_intensity.py similarity index 99% rename from tests/test_reference_based_normalize_intensity.py rename to tests/apps/reconstruction/transforms/test_reference_based_normalize_intensity.py index 8f3f8f2451..8afc10ec37 100644 --- a/tests/test_reference_based_normalize_intensity.py +++ b/tests/apps/reconstruction/transforms/test_reference_based_normalize_intensity.py @@ -52,7 +52,6 @@ class TestDetailedNormalizeIntensityd(unittest.TestCase): - @parameterized.expand(TESTS) def test_target_mean_std(self, args, data, normalized_data, normalized_target, mean, std): dtype = data[args["keys"][0]].dtype diff --git a/tests/test_reference_based_spatial_cropd.py b/tests/apps/reconstruction/transforms/test_reference_based_spatial_cropd.py similarity index 99% rename from tests/test_reference_based_spatial_cropd.py rename to tests/apps/reconstruction/transforms/test_reference_based_spatial_cropd.py index 13d8177b68..157da654ab 100644 --- a/tests/test_reference_based_spatial_cropd.py +++ b/tests/apps/reconstruction/transforms/test_reference_based_spatial_cropd.py @@ -46,7 +46,6 @@ class TestTargetBasedSpatialCropd(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, args, data, expected_shape): cropper = ReferenceBasedSpatialCropd(keys=args["keys"], ref_key=args["ref_key"]) diff --git a/tests/test_auto3dseg_bundlegen.py b/tests/apps/test_auto3dseg_bundlegen.py similarity index 99% rename from tests/test_auto3dseg_bundlegen.py rename to tests/apps/test_auto3dseg_bundlegen.py index 667909fa81..f82782c21f 100644 --- a/tests/test_auto3dseg_bundlegen.py +++ b/tests/apps/test_auto3dseg_bundlegen.py @@ -107,7 +107,6 @@ def run_auto3dseg_before_bundlegen(test_path, work_dir): @SkipIfBeforePyTorchVersion((1, 11, 1)) @skip_if_quick class TestBundleGen(unittest.TestCase): - def setUp(self) -> None: set_determinism(0) self.test_dir = tempfile.TemporaryDirectory() diff --git a/tests/test_check_hash.py b/tests/apps/test_check_hash.py similarity index 100% rename from tests/test_check_hash.py rename to tests/apps/test_check_hash.py diff --git a/tests/test_cross_validation.py b/tests/apps/test_cross_validation.py similarity index 96% rename from tests/test_cross_validation.py rename to tests/apps/test_cross_validation.py index a80af5b2a3..d776aef9b6 100644 --- a/tests/test_cross_validation.py +++ b/tests/apps/test_cross_validation.py @@ -11,8 +11,8 @@ from __future__ import annotations -import os import unittest +from pathlib import Path from monai.apps import CrossValidation, DecathlonDataset from monai.data import MetaTensor @@ -21,10 +21,9 @@ class TestCrossValidation(unittest.TestCase): - @skip_if_quick def test_values(self): - testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + testing_dir = Path(__file__).parents[1] / "testing_data" train_transform = Compose( [ LoadImaged(keys=["image", "label"]), diff --git a/tests/test_decathlondataset.py b/tests/apps/test_decathlondataset.py similarity index 97% rename from tests/test_decathlondataset.py rename to tests/apps/test_decathlondataset.py index f4f6262697..a40e3b753c 100644 --- a/tests/test_decathlondataset.py +++ b/tests/apps/test_decathlondataset.py @@ -23,10 +23,9 @@ class TestDecathlonDataset(unittest.TestCase): - @skip_if_quick def test_values(self): - testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + testing_dir = Path(__file__).resolve().parents[1] / "testing_data" transform = Compose( [ LoadImaged(keys=["image", "label"]), diff --git a/tests/test_download_and_extract.py b/tests/apps/test_download_and_extract.py similarity index 96% rename from tests/test_download_and_extract.py rename to tests/apps/test_download_and_extract.py index 0b5d632123..190e32fc79 100644 --- a/tests/test_download_and_extract.py +++ b/tests/apps/test_download_and_extract.py @@ -11,7 +11,6 @@ from __future__ import annotations -import os import tempfile import unittest from pathlib import Path @@ -25,10 +24,9 @@ @SkipIfNoModule("requests") class TestDownloadAndExtract(unittest.TestCase): - @skip_if_quick def test_actions(self): - testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + testing_dir = Path(__file__).parents[1] / "testing_data" config_dict = testing_data_config("images", "mednist") url = config_dict["url"] filepath = Path(testing_dir) / "MedNIST.tar.gz" diff --git a/tests/test_download_url_yandex.py b/tests/apps/test_download_url_yandex.py similarity index 100% rename from tests/test_download_url_yandex.py rename to tests/apps/test_download_url_yandex.py diff --git a/tests/test_mednistdataset.py b/tests/apps/test_mednistdataset.py similarity index 96% rename from tests/test_mednistdataset.py rename to tests/apps/test_mednistdataset.py index 7c6f837dc8..53da28911f 100644 --- a/tests/test_mednistdataset.py +++ b/tests/apps/test_mednistdataset.py @@ -25,10 +25,9 @@ class TestMedNISTDataset(unittest.TestCase): - @skip_if_quick def test_values(self): - testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + testing_dir = Path(__file__).parents[1] / "testing_data" transform = Compose( [ LoadImaged(keys="image"), diff --git a/tests/test_mmar_download.py b/tests/apps/test_mmar_download.py similarity index 99% rename from tests/test_mmar_download.py rename to tests/apps/test_mmar_download.py index 7ce0cc44cc..bca3fa2a30 100644 --- a/tests/test_mmar_download.py +++ b/tests/apps/test_mmar_download.py @@ -116,7 +116,6 @@ @unittest.skip("deprecating mmar tests") class TestMMMARDownload(unittest.TestCase): - @parameterized.expand(TEST_CASES) @skip_if_quick def test_download(self, idx): diff --git a/tests/test_tciadataset.py b/tests/apps/test_tciadataset.py similarity index 97% rename from tests/test_tciadataset.py rename to tests/apps/test_tciadataset.py index 7c12daf954..c624aec5cf 100644 --- a/tests/test_tciadataset.py +++ b/tests/apps/test_tciadataset.py @@ -14,6 +14,7 @@ import os import shutil import unittest +from pathlib import Path from monai.apps import TciaDataset from monai.apps.tcia import DCM_FILENAME_REGEX, TCIA_LABEL_DICT @@ -23,10 +24,9 @@ class TestTciaDataset(unittest.TestCase): - @skip_if_quick def test_values(self): - testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + testing_dir = Path(__file__).parents[1] / "testing_data" download_len = 1 val_frac = 1.0 collection = "QIN-PROSTATE-Repeatability" diff --git a/tests/apps/vista3d/__init__.py b/tests/apps/vista3d/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/apps/vista3d/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_point_based_window_inferer.py b/tests/apps/vista3d/test_point_based_window_inferer.py similarity index 100% rename from tests/test_point_based_window_inferer.py rename to tests/apps/vista3d/test_point_based_window_inferer.py diff --git a/tests/test_vista3d_sampler.py b/tests/apps/vista3d/test_vista3d_sampler.py similarity index 100% rename from tests/test_vista3d_sampler.py rename to tests/apps/vista3d/test_vista3d_sampler.py diff --git a/tests/test_vista3d_transforms.py b/tests/apps/vista3d/test_vista3d_transforms.py similarity index 100% rename from tests/test_vista3d_transforms.py rename to tests/apps/vista3d/test_vista3d_transforms.py diff --git a/tests/bundle/__init__.py b/tests/bundle/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/bundle/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_bundle_ckpt_export.py b/tests/bundle/test_bundle_ckpt_export.py similarity index 93% rename from tests/test_bundle_ckpt_export.py rename to tests/bundle/test_bundle_ckpt_export.py index 7c5f359e53..929c509b95 100644 --- a/tests/test_bundle_ckpt_export.py +++ b/tests/bundle/test_bundle_ckpt_export.py @@ -15,6 +15,7 @@ import os import tempfile import unittest +from pathlib import Path from parameterized import parameterized @@ -23,6 +24,8 @@ from monai.networks import save_state from tests.test_utils import command_line_tests, skip_if_windows +TESTS_PATH = Path(__file__).parents[1] + TEST_CASE_1 = ["", ""] TEST_CASE_2 = ["model", ""] @@ -46,8 +49,8 @@ def tearDown(self): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_export(self, key_in_ckpt, use_trace): - meta_file = os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json") - config_file = os.path.join(os.path.dirname(__file__), "testing_data", "inference.json") + meta_file = os.path.join(TESTS_PATH, "testing_data", "metadata.json") + config_file = os.path.join(TESTS_PATH, "testing_data", "inference.json") with tempfile.TemporaryDirectory() as tempdir: def_args = {"meta_file": "will be replaced by `meta_file` arg"} def_args_file = os.path.join(tempdir, "def_args.yaml") @@ -78,7 +81,7 @@ def test_export(self, key_in_ckpt, use_trace): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_default_value(self, key_in_ckpt, use_trace): - config_file = os.path.join(os.path.dirname(__file__), "testing_data", "inference.json") + config_file = os.path.join(TESTS_PATH, "testing_data", "inference.json") with tempfile.TemporaryDirectory() as tempdir: def_args = {"meta_file": "will be replaced by `meta_file` arg"} def_args_file = os.path.join(tempdir, "def_args.yaml") diff --git a/tests/test_bundle_download.py b/tests/bundle/test_bundle_download.py similarity index 99% rename from tests/test_bundle_download.py rename to tests/bundle/test_bundle_download.py index e6f8bb24b2..38620d98ff 100644 --- a/tests/test_bundle_download.py +++ b/tests/bundle/test_bundle_download.py @@ -95,7 +95,6 @@ class TestDownload(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) @skip_if_quick def test_github_download_bundle(self, bundle_name, version): @@ -261,7 +260,6 @@ def test_download_ngc(self, mock_get_versions): @skip_if_no_cuda class TestLoad(unittest.TestCase): - @parameterized.expand([TEST_CASE_7]) @skip_if_quick def test_load_weights(self, bundle_files, bundle_name, repo, device, model_file): @@ -406,7 +404,6 @@ def test_load_ts_module(self, bundle_files, bundle_name, version, repo, device, class TestDownloadLargefiles(unittest.TestCase): - @parameterized.expand([TEST_CASE_10]) @skip_if_quick def test_url_download_large_files(self, bundle_files, bundle_name, url, hash_val): diff --git a/tests/test_bundle_get_data.py b/tests/bundle/test_bundle_get_data.py similarity index 99% rename from tests/test_bundle_get_data.py rename to tests/bundle/test_bundle_get_data.py index 6fb73263ff..7675d85a7d 100644 --- a/tests/test_bundle_get_data.py +++ b/tests/bundle/test_bundle_get_data.py @@ -45,7 +45,6 @@ @skip_if_windows @SkipIfNoModule("requests") class TestGetBundleData(unittest.TestCase): - @parameterized.expand([TEST_CASE_3, TEST_CASE_4]) @skip_if_quick def test_get_all_bundles_list(self, params): diff --git a/tests/test_bundle_push_to_hf_hub.py b/tests/bundle/test_bundle_push_to_hf_hub.py similarity index 99% rename from tests/test_bundle_push_to_hf_hub.py rename to tests/bundle/test_bundle_push_to_hf_hub.py index d164b460a0..d6f448af71 100644 --- a/tests/test_bundle_push_to_hf_hub.py +++ b/tests/bundle/test_bundle_push_to_hf_hub.py @@ -28,7 +28,6 @@ class TestPushToHuggingFaceHub(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) @skip_if_quick @skipUnless(has_huggingface_hub, "Requires `huggingface_hub` package.") diff --git a/tests/test_bundle_trt_export.py b/tests/bundle/test_bundle_trt_export.py similarity index 93% rename from tests/test_bundle_trt_export.py rename to tests/bundle/test_bundle_trt_export.py index 142883845d..a7c570438d 100644 --- a/tests/test_bundle_trt_export.py +++ b/tests/bundle/test_bundle_trt_export.py @@ -15,6 +15,7 @@ import os import tempfile import unittest +from pathlib import Path from parameterized import parameterized @@ -55,7 +56,6 @@ @skip_if_quick @SkipIfBeforeComputeCapabilityVersion((7, 5)) class TestTRTExport(unittest.TestCase): - def setUp(self): self.device = os.environ.get("CUDA_VISIBLE_DEVICES") if not self.device: @@ -70,8 +70,9 @@ def tearDown(self): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) @unittest.skipUnless(has_torchtrt and has_tensorrt, "Torch-TensorRT is required for conversion!") def test_trt_export(self, convert_precision, input_shape, dynamic_batch): - meta_file = os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json") - config_file = os.path.join(os.path.dirname(__file__), "testing_data", "inference.json") + tests_dir = Path(__file__).resolve().parent + meta_file = os.path.join(tests_dir, "testing_data", "metadata.json") + config_file = os.path.join(tests_dir, "testing_data", "inference.json") with tempfile.TemporaryDirectory() as tempdir: def_args = {"meta_file": "will be replaced by `meta_file` arg"} def_args_file = os.path.join(tempdir, "def_args.yaml") @@ -107,8 +108,9 @@ def test_trt_export(self, convert_precision, input_shape, dynamic_batch): has_onnx and has_torchtrt and has_tensorrt, "Onnx and TensorRT are required for onnx-trt conversion!" ) def test_onnx_trt_export(self, convert_precision, input_shape, dynamic_batch): - meta_file = os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json") - config_file = os.path.join(os.path.dirname(__file__), "testing_data", "inference.json") + tests_dir = Path(__file__).resolve().parent + meta_file = os.path.join(tests_dir, "testing_data", "metadata.json") + config_file = os.path.join(tests_dir, "testing_data", "inference.json") with tempfile.TemporaryDirectory() as tempdir: def_args = {"meta_file": "will be replaced by `meta_file` arg"} def_args_file = os.path.join(tempdir, "def_args.yaml") diff --git a/tests/test_bundle_utils.py b/tests/bundle/test_bundle_utils.py similarity index 99% rename from tests/test_bundle_utils.py rename to tests/bundle/test_bundle_utils.py index fd8d35bc1a..ec12262eb7 100644 --- a/tests/test_bundle_utils.py +++ b/tests/bundle/test_bundle_utils.py @@ -51,7 +51,6 @@ @skip_if_windows class TestLoadBundleConfig(unittest.TestCase): - def setUp(self): self.bundle_dir = tempfile.TemporaryDirectory() self.dir_name = os.path.join(self.bundle_dir.name, "TestBundle") @@ -135,7 +134,6 @@ def test_load_config_ts(self): class TestPPrintEdges(unittest.TestCase): - def test_str(self): self.assertEqual(pprint_edges("", 0), "''") self.assertEqual(pprint_edges({"a": 1, "b": 2}, 0), "{'a': 1, 'b': 2}") diff --git a/tests/test_bundle_verify_metadata.py b/tests/bundle/test_bundle_verify_metadata.py similarity index 91% rename from tests/test_bundle_verify_metadata.py rename to tests/bundle/test_bundle_verify_metadata.py index ad10121bdd..fd8535eb10 100644 --- a/tests/test_bundle_verify_metadata.py +++ b/tests/bundle/test_bundle_verify_metadata.py @@ -15,20 +15,21 @@ import os import tempfile import unittest +from pathlib import Path from parameterized import parameterized from monai.bundle import ConfigParser, verify_metadata from tests.test_utils import command_line_tests, download_url_or_skip_test, skip_if_windows, testing_data_config -SCHEMA_FILE = os.path.join(os.path.dirname(__file__), "testing_data", "schema.json") +TESTS_DIR = Path(__file__).parents[1] +SCHEMA_FILE = os.path.join(TESTS_DIR, "testing_data", "schema.json") -TEST_CASE_1 = [os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json"), SCHEMA_FILE] +TEST_CASE_1 = [os.path.join(TESTS_DIR, "testing_data", "metadata.json"), SCHEMA_FILE] @skip_if_windows class TestVerifyMetaData(unittest.TestCase): - def setUp(self): self.config = testing_data_config("configs", "test_meta_file") download_url_or_skip_test( diff --git a/tests/test_bundle_verify_net.py b/tests/bundle/test_bundle_verify_net.py similarity index 94% rename from tests/test_bundle_verify_net.py rename to tests/bundle/test_bundle_verify_net.py index c7d508b019..f06f36a5a1 100644 --- a/tests/test_bundle_verify_net.py +++ b/tests/bundle/test_bundle_verify_net.py @@ -14,21 +14,23 @@ import os import tempfile import unittest +from pathlib import Path from parameterized import parameterized from monai.bundle import ConfigParser, verify_net_in_out from tests.test_utils import command_line_tests, skip_if_no_cuda, skip_if_windows +TESTS_PATH = Path(__file__).parents[1].as_posix() + TEST_CASE_1 = [ - os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json"), - os.path.join(os.path.dirname(__file__), "testing_data", "inference.json"), + os.path.join(TESTS_PATH, "testing_data", "metadata.json"), + os.path.join(TESTS_PATH, "testing_data", "inference.json"), ] @skip_if_windows class TestVerifyNetwork(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_verify(self, meta_file, config_file): with tempfile.TemporaryDirectory() as tempdir: diff --git a/tests/test_bundle_workflow.py b/tests/bundle/test_bundle_workflow.py similarity index 88% rename from tests/test_bundle_workflow.py rename to tests/bundle/test_bundle_workflow.py index 893b9dc991..ceb034ecff 100644 --- a/tests/test_bundle_workflow.py +++ b/tests/bundle/test_bundle_workflow.py @@ -17,6 +17,7 @@ import tempfile import unittest from copy import deepcopy +from pathlib import Path import nibabel as nib import numpy as np @@ -30,19 +31,20 @@ from monai.transforms import Compose, LoadImage, LoadImaged, SaveImaged from tests.nonconfig_workflow import NonConfigWorkflow, PythonicWorkflowImpl -TEST_CASE_1 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.json")] +MODULE_PATH = Path(__file__).resolve().parents[1] -TEST_CASE_2 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.yaml")] +TEST_CASE_1 = [os.path.join(MODULE_PATH, "testing_data", "inference.json")] -TEST_CASE_3 = [os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json")] +TEST_CASE_2 = [os.path.join(MODULE_PATH, "testing_data", "inference.yaml")] -TEST_CASE_4 = [os.path.join(os.path.dirname(__file__), "testing_data", "responsive_inference.json")] +TEST_CASE_3 = [os.path.join(MODULE_PATH, "testing_data", "config_fl_train.json")] + +TEST_CASE_4 = [os.path.join(MODULE_PATH, "testing_data", "responsive_inference.json")] TEST_CASE_NON_CONFIG_WRONG_LOG = [None, "logging.conf", "Cannot find the logging config file: logging.conf."] class TestBundleWorkflow(unittest.TestCase): - def setUp(self): self.data_dir = tempfile.mkdtemp() self.expected_shape = (128, 128, 128) @@ -105,7 +107,7 @@ def test_inference_config(self, config_file): inferer = ConfigWorkflow( workflow_type="infer", config_file=config_file, - logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"), + logging_file=os.path.join(MODULE_PATH, "testing_data", "logging.conf"), **override, ) self._test_inferer(inferer) @@ -114,8 +116,8 @@ def test_inference_config(self, config_file): inferer = ConfigWorkflow( config_file=config_file, workflow_type="infer", - properties_path=os.path.join(os.path.dirname(__file__), "testing_data", "fl_infer_properties.json"), - logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"), + properties_path=os.path.join(MODULE_PATH, "testing_data", "fl_infer_properties.json"), + logging_file=os.path.join(MODULE_PATH, "testing_data", "logging.conf"), **override, ) self._test_inferer(inferer) @@ -130,7 +132,7 @@ def test_responsive_inference_config(self, config_file): inferer = ConfigWorkflow( workflow_type="infer", config_file=config_file, - logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"), + logging_file=os.path.join(MODULE_PATH, "testing_data", "logging.conf"), ) # FIXME: temp add the property for test, we should add it to some formal realtime infer properties inferer.add_property(name="dataflow", required=True, config_id="dataflow") @@ -156,7 +158,7 @@ def test_train_config(self, config_file): trainer = ConfigWorkflow( workflow_type="train", config_file=config_file, - logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"), + logging_file=os.path.join(MODULE_PATH, "testing_data", "logging.conf"), init_id="initialize", run_id="run", final_id="finalize", @@ -202,8 +204,8 @@ def test_non_config_wrong_log_cases(self, meta_file, logging_file, expected_erro def test_pythonic_workflow(self): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") config_file = {"roi_size": (64, 64, 32)} - meta_file = os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json") - property_path = os.path.join(os.path.dirname(__file__), "testing_data", "python_workflow_properties.json") + meta_file = os.path.join(MODULE_PATH, "testing_data", "metadata.json") + property_path = os.path.join(MODULE_PATH, "testing_data", "python_workflow_properties.json") workflow = PythonicWorkflowImpl( workflow_type="infer", config_file=config_file, meta_file=meta_file, properties_path=property_path ) @@ -228,11 +230,11 @@ def test_pythonic_workflow(self): def test_create_pythonic_workflow(self): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") config_file = {"roi_size": (64, 64, 32)} - meta_file = os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json") - property_path = os.path.join(os.path.dirname(__file__), "testing_data", "python_workflow_properties.json") - sys.path.append(os.path.dirname(__file__)) + meta_file = os.path.join(MODULE_PATH, "testing_data", "metadata.json") + property_path = os.path.join(MODULE_PATH, "testing_data", "python_workflow_properties.json") + sys.path.append(MODULE_PATH) workflow = create_workflow( - "nonconfig_workflow.PythonicWorkflowImpl", + "tests.nonconfig_workflow.PythonicWorkflowImpl", workflow_type="infer", config_file=config_file, meta_file=meta_file, diff --git a/tests/test_component_locator.py b/tests/bundle/test_component_locator.py similarity index 100% rename from tests/test_component_locator.py rename to tests/bundle/test_component_locator.py diff --git a/tests/test_config_item.py b/tests/bundle/test_config_item.py similarity index 100% rename from tests/test_config_item.py rename to tests/bundle/test_config_item.py diff --git a/tests/test_config_parser.py b/tests/bundle/test_config_parser.py similarity index 98% rename from tests/test_config_parser.py rename to tests/bundle/test_config_parser.py index da2cea2625..5ead2af382 100644 --- a/tests/test_config_parser.py +++ b/tests/bundle/test_config_parser.py @@ -72,7 +72,6 @@ def case_pdb_inst(sarg=None): class TestClass: - @staticmethod def compute(a, b, func=lambda x, y: x + y): return func(a, b) @@ -92,9 +91,9 @@ def __call__(self, a, b): "cls_func": "$TestClass.cls_compute", "lambda_static_func": "$lambda x, y: TestClass.compute(x, y)", "lambda_cls_func": "$lambda x, y: TestClass.cls_compute(x, y)", - "compute": {"_target_": "tests.test_config_parser.TestClass.compute", "func": "@basic_func"}, - "cls_compute": {"_target_": "tests.test_config_parser.TestClass.cls_compute", "func": "@basic_func"}, - "call_compute": {"_target_": "tests.test_config_parser.TestClass"}, + "compute": {"_target_": "tests.bundle.test_config_parser.TestClass.compute", "func": "@basic_func"}, + "cls_compute": {"_target_": "tests.bundle.test_config_parser.TestClass.cls_compute", "func": "@basic_func"}, + "call_compute": {"_target_": "tests.bundle.test_config_parser.TestClass"}, "error_func": "$TestClass.__call__", "": "$lambda x, y: x + y", } @@ -143,7 +142,6 @@ def __call__(self, a, b): class TestConfigParser(unittest.TestCase): - def test_config_content(self): test_config = {"preprocessing": [{"_target_": "LoadImage"}], "dataset": {"_target_": "Dataset"}} parser = ConfigParser(config=test_config) diff --git a/tests/test_reference_resolver.py b/tests/bundle/test_reference_resolver.py similarity index 100% rename from tests/test_reference_resolver.py rename to tests/bundle/test_reference_resolver.py diff --git a/tests/clang_format_utils.py b/tests/clang_format_utils.py index 11483e957d..fbdd1d980c 100644 --- a/tests/clang_format_utils.py +++ b/tests/clang_format_utils.py @@ -18,6 +18,7 @@ import platform import stat import sys +from pathlib import Path from monai.apps.utils import download_url @@ -25,7 +26,7 @@ HOST_PLATFORM = platform.system() # MONAI directory root, derived from the location of this file. -MONAI_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +MONAI_ROOT = Path(__file__).resolve().parent.parent # This dictionary maps each platform to the S3 object URL for its clang-format binary. PLATFORM_TO_CF_URL = { diff --git a/tests/config/__init__.py b/tests/config/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/config/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_cv2_dist.py b/tests/config/test_cv2_dist.py similarity index 99% rename from tests/test_cv2_dist.py rename to tests/config/test_cv2_dist.py index 25d20b15e6..2ef8e5b10f 100644 --- a/tests/test_cv2_dist.py +++ b/tests/config/test_cv2_dist.py @@ -42,7 +42,6 @@ def main_worker(rank, ngpus_per_node, port): @skip_if_no_cuda class TestCV2Dist(unittest.TestCase): - def test_cv2_cuda_ops(self): print_config() ngpus_per_node = torch.cuda.device_count() diff --git a/tests/croppers.py b/tests/croppers.py index 5b7f5148d9..fe06c2e2cb 100644 --- a/tests/croppers.py +++ b/tests/croppers.py @@ -24,7 +24,6 @@ class CropTest(unittest.TestCase): - @staticmethod def get_arr(shape): return np.random.randint(100, size=shape).astype(float) diff --git a/tests/test_meta_tensor.py b/tests/data/meta_tensor/test_meta_tensor.py similarity index 99% rename from tests/test_meta_tensor.py rename to tests/data/meta_tensor/test_meta_tensor.py index f0c6abc3b1..d6a7ef9f0b 100644 --- a/tests/test_meta_tensor.py +++ b/tests/data/meta_tensor/test_meta_tensor.py @@ -50,7 +50,6 @@ def rand_string(min_len=5, max_len=10): class TestMetaTensor(unittest.TestCase): - @staticmethod def get_im(shape=None, dtype=None, device=None): if shape is None: diff --git a/tests/test_to_from_meta_tensord.py b/tests/data/meta_tensor/test_to_from_meta_tensord.py similarity index 99% rename from tests/test_to_from_meta_tensord.py rename to tests/data/meta_tensor/test_to_from_meta_tensord.py index 06c089cb5e..64b64728cc 100644 --- a/tests/test_to_from_meta_tensord.py +++ b/tests/data/meta_tensor/test_to_from_meta_tensord.py @@ -42,7 +42,6 @@ def rand_string(min_len=5, max_len=10): @unittest.skipIf(config.USE_META_DICT, "skipping not metatensor") class TestToFromMetaTensord(unittest.TestCase): - @staticmethod def get_im(shape=None, dtype=None, device=None): if shape is None: diff --git a/tests/test_arraydataset.py b/tests/data/test_arraydataset.py similarity index 100% rename from tests/test_arraydataset.py rename to tests/data/test_arraydataset.py diff --git a/tests/test_box_utils.py b/tests/data/test_box_utils.py similarity index 99% rename from tests/test_box_utils.py rename to tests/data/test_box_utils.py index d277fe1af0..390fd901fd 100644 --- a/tests/test_box_utils.py +++ b/tests/data/test_box_utils.py @@ -140,7 +140,6 @@ class TestCreateBoxList(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, input_data, mode2, expected_box, expected_area): expected_box = convert_data_type(expected_box, dtype=np.float32)[0] diff --git a/tests/test_cachedataset.py b/tests/data/test_cachedataset.py similarity index 100% rename from tests/test_cachedataset.py rename to tests/data/test_cachedataset.py diff --git a/tests/test_cachedataset_parallel.py b/tests/data/test_cachedataset_parallel.py similarity index 100% rename from tests/test_cachedataset_parallel.py rename to tests/data/test_cachedataset_parallel.py diff --git a/tests/test_cachedataset_persistent_workers.py b/tests/data/test_cachedataset_persistent_workers.py similarity index 100% rename from tests/test_cachedataset_persistent_workers.py rename to tests/data/test_cachedataset_persistent_workers.py diff --git a/tests/test_cachentransdataset.py b/tests/data/test_cachentransdataset.py similarity index 100% rename from tests/test_cachentransdataset.py rename to tests/data/test_cachentransdataset.py diff --git a/tests/test_check_missing_files.py b/tests/data/test_check_missing_files.py similarity index 100% rename from tests/test_check_missing_files.py rename to tests/data/test_check_missing_files.py diff --git a/tests/test_create_cross_validation_datalist.py b/tests/data/test_create_cross_validation_datalist.py similarity index 100% rename from tests/test_create_cross_validation_datalist.py rename to tests/data/test_create_cross_validation_datalist.py diff --git a/tests/test_csv_dataset.py b/tests/data/test_csv_dataset.py similarity index 100% rename from tests/test_csv_dataset.py rename to tests/data/test_csv_dataset.py diff --git a/tests/test_csv_iterable_dataset.py b/tests/data/test_csv_iterable_dataset.py similarity index 99% rename from tests/test_csv_iterable_dataset.py rename to tests/data/test_csv_iterable_dataset.py index 3dc54e3151..ed71cb39d4 100644 --- a/tests/test_csv_iterable_dataset.py +++ b/tests/data/test_csv_iterable_dataset.py @@ -26,7 +26,6 @@ @skip_if_windows class TestCSVIterableDataset(unittest.TestCase): - def test_values(self): with tempfile.TemporaryDirectory() as tempdir: test_data1 = [ diff --git a/tests/test_csv_saver.py b/tests/data/test_csv_saver.py similarity index 100% rename from tests/test_csv_saver.py rename to tests/data/test_csv_saver.py diff --git a/tests/test_dataloader.py b/tests/data/test_dataloader.py similarity index 99% rename from tests/test_dataloader.py rename to tests/data/test_dataloader.py index 929f362341..32e624a860 100644 --- a/tests/test_dataloader.py +++ b/tests/data/test_dataloader.py @@ -29,7 +29,6 @@ class TestDataLoader(unittest.TestCase): - def test_values(self): datalist = [ {"image": "spleen_19.nii.gz", "label": "spleen_label_19.nii.gz"}, @@ -60,7 +59,6 @@ def test_exception(self, datalist): class _RandomDataset(torch.utils.data.Dataset, Randomizable): - def __getitem__(self, index): return self.R.randint(0, 1000, (1,)) diff --git a/tests/test_dataset.py b/tests/data/test_dataset.py similarity index 98% rename from tests/test_dataset.py rename to tests/data/test_dataset.py index 0d37ae2efd..572dccc53b 100644 --- a/tests/test_dataset.py +++ b/tests/data/test_dataset.py @@ -24,13 +24,12 @@ from monai.data import Dataset from monai.transforms import Compose, Lambda, LoadImage, LoadImaged, SimulateDelay, SimulateDelayd -from tests.test_compose import TEST_COMPOSE_LAZY_ON_CALL_LOGGING_TEST_CASES, data_from_keys +from tests.transforms.compose.test_compose import TEST_COMPOSE_LAZY_ON_CALL_LOGGING_TEST_CASES, data_from_keys TEST_CASE_1 = [(128, 128, 128)] class TestDataset(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_shape(self, expected_shape): test_image = nib.Nifti1Image(np.random.randint(0, 2, size=[128, 128, 128]).astype(float), np.eye(4)) @@ -100,7 +99,6 @@ def test_dataset_lazy_on_call(self): class TestTupleDataset(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) def test_shape(self, expected_shape): test_image = nib.Nifti1Image(np.random.randint(0, 2, size=[128, 128, 128]).astype(float), np.eye(4)) diff --git a/tests/test_dataset_func.py b/tests/data/test_dataset_func.py similarity index 100% rename from tests/test_dataset_func.py rename to tests/data/test_dataset_func.py diff --git a/tests/test_dataset_summary.py b/tests/data/test_dataset_summary.py similarity index 100% rename from tests/test_dataset_summary.py rename to tests/data/test_dataset_summary.py diff --git a/tests/test_fft_utils.py b/tests/data/test_fft_utils.py similarity index 99% rename from tests/test_fft_utils.py rename to tests/data/test_fft_utils.py index 44364afb63..f09cb26ae4 100644 --- a/tests/test_fft_utils.py +++ b/tests/data/test_fft_utils.py @@ -42,7 +42,6 @@ class TestFFT(unittest.TestCase): - @parameterized.expand(TESTS) def test(self, test_data, res_data): result = fftn_centered(test_data, spatial_dims=2, is_complex=False) diff --git a/tests/test_folder_layout.py b/tests/data/test_folder_layout.py similarity index 100% rename from tests/test_folder_layout.py rename to tests/data/test_folder_layout.py diff --git a/tests/test_gdsdataset.py b/tests/data/test_gdsdataset.py similarity index 99% rename from tests/test_gdsdataset.py rename to tests/data/test_gdsdataset.py index dda171ea3c..b4acb3bf55 100644 --- a/tests/test_gdsdataset.py +++ b/tests/data/test_gdsdataset.py @@ -64,7 +64,6 @@ class _InplaceXform(Transform): - def __call__(self, data): data[0] = data[0] + 1 return data @@ -74,7 +73,6 @@ def __call__(self, data): @unittest.skipUnless(has_cp, "Requires CuPy library.") @unittest.skipUnless(has_cp and has_kvikio_numpy, "Requires CuPy and kvikio library.") class TestDataset(unittest.TestCase): - def test_cache(self): """testing no inplace change to the hashed item""" for p in TEST_NDARRAYS[:2]: diff --git a/tests/test_grid_dataset.py b/tests/data/test_grid_dataset.py similarity index 99% rename from tests/test_grid_dataset.py rename to tests/data/test_grid_dataset.py index 0ed7c1b263..0b44564834 100644 --- a/tests/test_grid_dataset.py +++ b/tests/data/test_grid_dataset.py @@ -58,7 +58,6 @@ def identity_generator(x): class TestGridPatchDataset(unittest.TestCase): - def setUp(self): set_determinism(seed=1234) diff --git a/tests/test_handler_smartcache.py b/tests/data/test_handler_smartcache.py similarity index 100% rename from tests/test_handler_smartcache.py rename to tests/data/test_handler_smartcache.py diff --git a/tests/test_hashing.py b/tests/data/test_hashing.py similarity index 100% rename from tests/test_hashing.py rename to tests/data/test_hashing.py diff --git a/tests/test_header_correct.py b/tests/data/test_header_correct.py similarity index 100% rename from tests/test_header_correct.py rename to tests/data/test_header_correct.py diff --git a/tests/test_image_dataset.py b/tests/data/test_image_dataset.py similarity index 100% rename from tests/test_image_dataset.py rename to tests/data/test_image_dataset.py diff --git a/tests/test_image_rw.py b/tests/data/test_image_rw.py similarity index 99% rename from tests/test_image_rw.py rename to tests/data/test_image_rw.py index 20db7ca640..d90c1c8571 100644 --- a/tests/test_image_rw.py +++ b/tests/data/test_image_rw.py @@ -33,7 +33,6 @@ @unittest.skipUnless(has_itk, "itk not installed") class TestLoadSaveNifti(unittest.TestCase): - def setUp(self): self.test_dir = tempfile.mkdtemp() @@ -98,7 +97,6 @@ def test_4d(self, reader, writer): @unittest.skipUnless(has_itk, "itk not installed") class TestLoadSavePNG(unittest.TestCase): - def setUp(self): self.test_dir = tempfile.mkdtemp() @@ -139,7 +137,6 @@ def test_rgb(self, reader, writer): class TestRegRes(unittest.TestCase): - def test_0_default(self): self.assertTrue(len(resolve_writer(".png")) > 0, "has png writer") self.assertTrue(len(resolve_writer(".nrrd")) > 0, "has nrrd writer") @@ -156,7 +153,6 @@ def test_1_new(self): @unittest.skipUnless(has_itk, "itk not installed") class TestLoadSaveNrrd(unittest.TestCase): - def setUp(self): self.test_dir = tempfile.mkdtemp() diff --git a/tests/test_init_reader.py b/tests/data/test_init_reader.py similarity index 99% rename from tests/test_init_reader.py rename to tests/data/test_init_reader.py index cf73b84766..4170412207 100644 --- a/tests/test_init_reader.py +++ b/tests/data/test_init_reader.py @@ -19,7 +19,6 @@ class TestInitLoadImage(unittest.TestCase): - def test_load_image(self): instance1 = LoadImage(image_only=False, dtype=None) instance2 = LoadImage(image_only=True, dtype=None) diff --git a/tests/test_is_supported_format.py b/tests/data/test_is_supported_format.py similarity index 100% rename from tests/test_is_supported_format.py rename to tests/data/test_is_supported_format.py diff --git a/tests/test_iterable_dataset.py b/tests/data/test_iterable_dataset.py similarity index 100% rename from tests/test_iterable_dataset.py rename to tests/data/test_iterable_dataset.py diff --git a/tests/test_itk_torch_bridge.py b/tests/data/test_itk_torch_bridge.py similarity index 99% rename from tests/test_itk_torch_bridge.py rename to tests/data/test_itk_torch_bridge.py index ca73f12174..a8b35b61cb 100644 --- a/tests/test_itk_torch_bridge.py +++ b/tests/data/test_itk_torch_bridge.py @@ -15,6 +15,7 @@ import os import tempfile import unittest +from pathlib import Path import numpy as np import torch @@ -55,10 +56,9 @@ @unittest.skipUnless(has_itk, "Requires `itk` package.") class TestITKTorchAffineMatrixBridge(unittest.TestCase): - def setUp(self): set_determinism(seed=0) - self.data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + self.data_dir = Path(__file__).parents[1] / "testing_data" self.reader = ITKReader(pixel_type=itk.F) for file_name in RW_TESTS: @@ -500,7 +500,6 @@ def test_use_reference_space(self, ref_filepath, filepath): @unittest.skipUnless(has_nib, "Requires `nibabel` package.") @skip_if_quick class TestITKTorchRW(unittest.TestCase): - def setUp(self): TestITKTorchAffineMatrixBridge.setUp(self) diff --git a/tests/test_itk_writer.py b/tests/data/test_itk_writer.py similarity index 100% rename from tests/test_itk_writer.py rename to tests/data/test_itk_writer.py diff --git a/tests/test_list_data_collate.py b/tests/data/test_list_data_collate.py similarity index 100% rename from tests/test_list_data_collate.py rename to tests/data/test_list_data_collate.py diff --git a/tests/test_lmdbdataset.py b/tests/data/test_lmdbdataset.py similarity index 99% rename from tests/test_lmdbdataset.py rename to tests/data/test_lmdbdataset.py index c1fcee8071..04fbf95cc3 100644 --- a/tests/test_lmdbdataset.py +++ b/tests/data/test_lmdbdataset.py @@ -81,7 +81,6 @@ class _InplaceXform(Transform): - def __call__(self, data): if data: data[0] = data[0] + np.pi @@ -92,7 +91,6 @@ def __call__(self, data): @skip_if_windows class TestLMDBDataset(unittest.TestCase): - def test_cache(self): """testing no inplace change to the hashed item""" items = [[list(range(i))] for i in range(5)] diff --git a/tests/test_lmdbdataset_dist.py b/tests/data/test_lmdbdataset_dist.py similarity index 99% rename from tests/test_lmdbdataset_dist.py rename to tests/data/test_lmdbdataset_dist.py index dc3fd2f9cb..0ebe206205 100644 --- a/tests/test_lmdbdataset_dist.py +++ b/tests/data/test_lmdbdataset_dist.py @@ -23,7 +23,6 @@ class _InplaceXform(Transform): - def __call__(self, data): if data: data[0] = data[0] + np.pi @@ -34,7 +33,6 @@ def __call__(self, data): @skip_if_windows class TestMPLMDBDataset(DistTestCase): - def setUp(self): self.tempdir = tempfile.mkdtemp() diff --git a/tests/test_load_decathlon_datalist.py b/tests/data/test_load_decathlon_datalist.py similarity index 100% rename from tests/test_load_decathlon_datalist.py rename to tests/data/test_load_decathlon_datalist.py diff --git a/tests/test_make_nifti.py b/tests/data/test_make_nifti.py similarity index 99% rename from tests/test_make_nifti.py rename to tests/data/test_make_nifti.py index b3d85c45c7..f604eab76f 100644 --- a/tests/test_make_nifti.py +++ b/tests/data/test_make_nifti.py @@ -34,7 +34,6 @@ @unittest.skipUnless(has_nib, "Requires nibabel") class TestMakeNifti(unittest.TestCase): - @parameterized.expand(TESTS) def test_make_nifti(self, params): im, _ = create_test_image_2d(100, 88) diff --git a/tests/test_mapping_file.py b/tests/data/test_mapping_file.py similarity index 100% rename from tests/test_mapping_file.py rename to tests/data/test_mapping_file.py diff --git a/tests/test_masked_patch_wsi_dataset.py b/tests/data/test_masked_patch_wsi_dataset.py similarity index 96% rename from tests/test_masked_patch_wsi_dataset.py rename to tests/data/test_masked_patch_wsi_dataset.py index 59167b8b14..f2ff86c365 100644 --- a/tests/test_masked_patch_wsi_dataset.py +++ b/tests/data/test_masked_patch_wsi_dataset.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from unittest import skipUnless from numpy.testing import assert_array_equal @@ -34,7 +35,8 @@ FILE_KEY = "wsi_generic_tiff" FILE_URL = testing_data_config("images", FILE_KEY, "url") -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", f"temp_{FILE_KEY}.tiff") +TESTS_PATH = Path(__file__).parents[1] +FILE_PATH = os.path.join(TESTS_PATH, "testing_data", f"temp_{FILE_KEY}.tiff") TEST_CASE_0 = [ {"data": [{"image": FILE_PATH, WSIPatchKeys.LEVEL: 8, WSIPatchKeys.SIZE: (2, 2)}], "mask_level": 8}, @@ -74,7 +76,6 @@ def setUpModule(): class MaskedPatchWSIDatasetTests: - class Tests(unittest.TestCase): backend = None @@ -101,7 +102,6 @@ def test_gen_patches(self, input_parameters, expected): @skipUnless(has_cucim, "Requires cucim") class TestSlidingPatchWSIDatasetCuCIM(MaskedPatchWSIDatasetTests.Tests): - @classmethod def setUpClass(cls): cls.backend = "cucim" @@ -109,7 +109,6 @@ def setUpClass(cls): @skipUnless(has_osl, "Requires openslide") class TestSlidingPatchWSIDatasetOpenSlide(MaskedPatchWSIDatasetTests.Tests): - @classmethod def setUpClass(cls): cls.backend = "openslide" diff --git a/tests/test_nifti_header_revise.py b/tests/data/test_nifti_header_revise.py similarity index 100% rename from tests/test_nifti_header_revise.py rename to tests/data/test_nifti_header_revise.py diff --git a/tests/test_nifti_rw.py b/tests/data/test_nifti_rw.py similarity index 99% rename from tests/test_nifti_rw.py rename to tests/data/test_nifti_rw.py index dded2b19c3..cd75bef93d 100644 --- a/tests/test_nifti_rw.py +++ b/tests/data/test_nifti_rw.py @@ -72,7 +72,6 @@ class TestNiftiLoadRead(unittest.TestCase): - @parameterized.expand(TESTS) def test_orientation(self, array, affine, reader_param, expected): test_image = make_nifti_image(array, affine) diff --git a/tests/test_npzdictitemdataset.py b/tests/data/test_npzdictitemdataset.py similarity index 100% rename from tests/test_npzdictitemdataset.py rename to tests/data/test_npzdictitemdataset.py diff --git a/tests/test_nrrd_reader.py b/tests/data/test_nrrd_reader.py similarity index 100% rename from tests/test_nrrd_reader.py rename to tests/data/test_nrrd_reader.py diff --git a/tests/test_numpy_reader.py b/tests/data/test_numpy_reader.py similarity index 99% rename from tests/test_numpy_reader.py rename to tests/data/test_numpy_reader.py index bfb9e1b15b..c427778c67 100644 --- a/tests/test_numpy_reader.py +++ b/tests/data/test_numpy_reader.py @@ -24,7 +24,6 @@ class TestNumpyReader(unittest.TestCase): - def test_npy(self): test_data = np.random.randint(0, 256, size=[3, 4, 4]) with tempfile.TemporaryDirectory() as tempdir: diff --git a/tests/test_partition_dataset.py b/tests/data/test_partition_dataset.py similarity index 100% rename from tests/test_partition_dataset.py rename to tests/data/test_partition_dataset.py diff --git a/tests/test_partition_dataset_classes.py b/tests/data/test_partition_dataset_classes.py similarity index 100% rename from tests/test_partition_dataset_classes.py rename to tests/data/test_partition_dataset_classes.py diff --git a/tests/test_patch_dataset.py b/tests/data/test_patch_dataset.py similarity index 100% rename from tests/test_patch_dataset.py rename to tests/data/test_patch_dataset.py diff --git a/tests/test_patch_wsi_dataset.py b/tests/data/test_patch_wsi_dataset.py similarity index 98% rename from tests/test_patch_wsi_dataset.py rename to tests/data/test_patch_wsi_dataset.py index 9203cb2d1a..0d520e56e8 100644 --- a/tests/test_patch_wsi_dataset.py +++ b/tests/data/test_patch_wsi_dataset.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from unittest import skipUnless import numpy as np @@ -34,7 +35,8 @@ FILE_KEY = "wsi_generic_tiff" FILE_URL = testing_data_config("images", FILE_KEY, "url") -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", f"temp_{FILE_KEY}.tiff") +TESTS_PATH = Path(__file__).parents[1].as_posix() +FILE_PATH = os.path.join(TESTS_PATH, "testing_data", f"temp_{FILE_KEY}.tiff") TEST_CASE_0 = [ { @@ -128,7 +130,6 @@ def setUpModule(): class PatchWSIDatasetTests: - class Tests(unittest.TestCase): backend = None @@ -183,7 +184,6 @@ def test_read_patches_str_multi(self, input_parameters, expected): @skipUnless(has_cim, "Requires cucim") class TestPatchWSIDatasetCuCIM(PatchWSIDatasetTests.Tests): - @classmethod def setUpClass(cls): cls.backend = "cucim" @@ -191,7 +191,6 @@ def setUpClass(cls): @skipUnless(has_osl, "Requires openslide") class TestPatchWSIDatasetOpenSlide(PatchWSIDatasetTests.Tests): - @classmethod def setUpClass(cls): cls.backend = "openslide" diff --git a/tests/test_persistentdataset.py b/tests/data/test_persistentdataset.py similarity index 100% rename from tests/test_persistentdataset.py rename to tests/data/test_persistentdataset.py diff --git a/tests/test_persistentdataset_dist.py b/tests/data/test_persistentdataset_dist.py similarity index 99% rename from tests/test_persistentdataset_dist.py rename to tests/data/test_persistentdataset_dist.py index 2a9df63c06..ab36979f5e 100644 --- a/tests/test_persistentdataset_dist.py +++ b/tests/data/test_persistentdataset_dist.py @@ -25,7 +25,6 @@ class _InplaceXform(Transform): - def __call__(self, data): if data: data[0] = data[0] + np.pi @@ -35,7 +34,6 @@ def __call__(self, data): class TestDistDataset(DistTestCase): - def setUp(self): self.tempdir = tempfile.mkdtemp() @@ -60,7 +58,6 @@ def test_mp_dataset(self): class TestDistCreateDataset(DistTestCase): - def setUp(self): self.tempdir = tempfile.mkdtemp() diff --git a/tests/test_pil_reader.py b/tests/data/test_pil_reader.py similarity index 100% rename from tests/test_pil_reader.py rename to tests/data/test_pil_reader.py diff --git a/tests/test_png_rw.py b/tests/data/test_png_rw.py similarity index 100% rename from tests/test_png_rw.py rename to tests/data/test_png_rw.py diff --git a/tests/test_resample_datalist.py b/tests/data/test_resample_datalist.py similarity index 100% rename from tests/test_resample_datalist.py rename to tests/data/test_resample_datalist.py diff --git a/tests/test_sampler_dist.py b/tests/data/test_sampler_dist.py similarity index 99% rename from tests/test_sampler_dist.py rename to tests/data/test_sampler_dist.py index cd0dbc07e2..7059a44e6b 100644 --- a/tests/test_sampler_dist.py +++ b/tests/data/test_sampler_dist.py @@ -24,7 +24,6 @@ class DistributedSamplerTest(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_even(self): data = [1, 2, 3, 4, 5] diff --git a/tests/test_select_cross_validation_folds.py b/tests/data/test_select_cross_validation_folds.py similarity index 100% rename from tests/test_select_cross_validation_folds.py rename to tests/data/test_select_cross_validation_folds.py diff --git a/tests/test_shuffle_buffer.py b/tests/data/test_shuffle_buffer.py similarity index 99% rename from tests/test_shuffle_buffer.py rename to tests/data/test_shuffle_buffer.py index e5c27e51a5..ee0ce9e2d1 100644 --- a/tests/test_shuffle_buffer.py +++ b/tests/data/test_shuffle_buffer.py @@ -23,7 +23,6 @@ @SkipIfBeforePyTorchVersion((1, 12)) class TestShuffleBuffer(unittest.TestCase): - def test_shape(self): buffer = ShuffleBuffer([1, 2, 3, 4], seed=0) num_workers = 2 if sys.platform == "linux" else 0 diff --git a/tests/test_sliding_patch_wsi_dataset.py b/tests/data/test_sliding_patch_wsi_dataset.py similarity index 96% rename from tests/test_sliding_patch_wsi_dataset.py rename to tests/data/test_sliding_patch_wsi_dataset.py index 8664e865a3..8e27f7ad0e 100644 --- a/tests/test_sliding_patch_wsi_dataset.py +++ b/tests/data/test_sliding_patch_wsi_dataset.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from unittest import skipUnless import numpy as np @@ -34,10 +35,11 @@ FILE_KEY = "wsi_generic_tiff" FILE_URL = testing_data_config("images", FILE_KEY, "url") -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", f"temp_{FILE_KEY}.tiff") +TESTS_PATH = Path(__file__).parents[1].as_posix() +FILE_PATH = os.path.join(TESTS_PATH, "testing_data", f"temp_{FILE_KEY}.tiff") -FILE_PATH_SMALL_0 = os.path.join(os.path.dirname(__file__), "testing_data", "temp_wsi_inference_0.tiff") -FILE_PATH_SMALL_1 = os.path.join(os.path.dirname(__file__), "testing_data", "temp_wsi_inference_1.tiff") +FILE_PATH_SMALL_0 = os.path.join(TESTS_PATH, "testing_data", "temp_wsi_inference_0.tiff") +FILE_PATH_SMALL_1 = os.path.join(TESTS_PATH, "testing_data", "temp_wsi_inference_1.tiff") ARRAY_SMALL_0 = np.random.randint(low=0, high=255, size=(3, 4, 4), dtype=np.uint8) ARRAY_SMALL_1 = np.random.randint(low=0, high=255, size=(3, 5, 5), dtype=np.uint8) @@ -213,7 +215,6 @@ def setUpModule(): class SlidingPatchWSIDatasetTests: - class Tests(unittest.TestCase): backend = None @@ -253,7 +254,6 @@ def test_read_patches_large(self, input_parameters, expected): @skipUnless(has_cucim, "Requires cucim") class TestSlidingPatchWSIDatasetCuCIM(SlidingPatchWSIDatasetTests.Tests): - @classmethod def setUpClass(cls): cls.backend = "cucim" @@ -261,7 +261,6 @@ def setUpClass(cls): @skipUnless(has_osl, "Requires openslide") class TestSlidingPatchWSIDatasetOpenSlide(SlidingPatchWSIDatasetTests.Tests): - @classmethod def setUpClass(cls): cls.backend = "openslide" diff --git a/tests/test_smartcachedataset.py b/tests/data/test_smartcachedataset.py similarity index 99% rename from tests/test_smartcachedataset.py rename to tests/data/test_smartcachedataset.py index 1c55961d85..7e59747c5c 100644 --- a/tests/test_smartcachedataset.py +++ b/tests/data/test_smartcachedataset.py @@ -38,7 +38,6 @@ class TestSmartCacheDataset(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5]) def test_shape(self, replace_rate, num_replace_workers, transform): test_image = nib.Nifti1Image(np.random.randint(0, 2, size=[8, 8, 8]).astype(float), np.eye(4)) diff --git a/tests/test_synthetic.py b/tests/data/test_synthetic.py similarity index 100% rename from tests/test_synthetic.py rename to tests/data/test_synthetic.py diff --git a/tests/test_thread_buffer.py b/tests/data/test_thread_buffer.py similarity index 99% rename from tests/test_thread_buffer.py rename to tests/data/test_thread_buffer.py index cd7abc8dd4..71c6c889d7 100644 --- a/tests/test_thread_buffer.py +++ b/tests/data/test_thread_buffer.py @@ -24,7 +24,6 @@ class TestDataLoader(unittest.TestCase): - def setUp(self): super().setUp() diff --git a/tests/test_threadcontainer.py b/tests/data/test_threadcontainer.py similarity index 96% rename from tests/test_threadcontainer.py rename to tests/data/test_threadcontainer.py index e61ef2bfd1..519f8350a0 100644 --- a/tests/test_threadcontainer.py +++ b/tests/data/test_threadcontainer.py @@ -15,6 +15,7 @@ import tempfile import time import unittest +from pathlib import Path import torch @@ -36,7 +37,6 @@ class TestThreadContainer(unittest.TestCase): - @SkipIfNoModule("ignite") def test_container(self): net = torch.nn.Conv2d(1, 1, 3, padding=1) @@ -70,8 +70,8 @@ def test_container(self): @SkipIfNoModule("matplotlib") def test_plot(self): set_determinism(0) - - testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + test_dir = Path(__file__).parents[1] + testing_dir = os.path.join(test_dir, "testing_data") net = torch.nn.Conv2d(1, 1, 3, padding=1) diff --git a/tests/test_video_datasets.py b/tests/data/test_video_datasets.py similarity index 97% rename from tests/test_video_datasets.py rename to tests/data/test_video_datasets.py index 32eed94407..b338885511 100644 --- a/tests/test_video_datasets.py +++ b/tests/data/test_video_datasets.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path import torch @@ -31,7 +32,6 @@ class Base: - class TestVideoDataset(unittest.TestCase): video_source: int | str ds: type[VideoDataset] @@ -116,7 +116,8 @@ def setUpClass(cls): cls.known_num_frames = None cls.video_source = None return - cls.video_source = os.path.join(os.path.dirname(__file__), "testing_data", fname) + tests_path = Path(__file__).parents[1].as_posix() + cls.video_source = os.path.join(tests_path, "testing_data", fname) download_url_or_skip_test( url=config["url"], filepath=cls.video_source, diff --git a/tests/test_weighted_random_sampler_dist.py b/tests/data/test_weighted_random_sampler_dist.py similarity index 99% rename from tests/test_weighted_random_sampler_dist.py rename to tests/data/test_weighted_random_sampler_dist.py index d60fae08da..cbcbc6821a 100644 --- a/tests/test_weighted_random_sampler_dist.py +++ b/tests/data/test_weighted_random_sampler_dist.py @@ -24,7 +24,6 @@ @skip_if_windows @skip_if_darwin class DistributedWeightedRandomSamplerTest(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_sampling(self): data = [1, 2, 3, 4, 5] diff --git a/tests/test_zipdataset.py b/tests/data/test_zipdataset.py similarity index 100% rename from tests/test_zipdataset.py rename to tests/data/test_zipdataset.py diff --git a/tests/test_decollate.py b/tests/data/utils/test_decollate.py similarity index 99% rename from tests/test_decollate.py rename to tests/data/utils/test_decollate.py index 2eaec0937c..9adb3f267d 100644 --- a/tests/test_decollate.py +++ b/tests/data/utils/test_decollate.py @@ -81,7 +81,6 @@ class TestDeCollate(unittest.TestCase): - def setUp(self) -> None: set_determinism(seed=0) @@ -160,7 +159,6 @@ def test_decollation_list(self, *transforms): class TestBasicDeCollate(unittest.TestCase): - @parameterized.expand(TEST_BASIC) def test_decollation_examples(self, input_val, expected_out): out = decollate_batch(input_val) diff --git a/tests/test_dev_collate.py b/tests/data/utils/test_dev_collate.py similarity index 100% rename from tests/test_dev_collate.py rename to tests/data/utils/test_dev_collate.py diff --git a/tests/test_file_basename.py b/tests/data/utils/test_file_basename.py similarity index 100% rename from tests/test_file_basename.py rename to tests/data/utils/test_file_basename.py diff --git a/tests/test_ori_ras_lps.py b/tests/data/utils/test_ori_ras_lps.py similarity index 77% rename from tests/test_ori_ras_lps.py rename to tests/data/utils/test_ori_ras_lps.py index 9536c64b35..3d69982b1e 100644 --- a/tests/test_ori_ras_lps.py +++ b/tests/data/utils/test_ori_ras_lps.py @@ -23,14 +23,16 @@ for p in TEST_NDARRAYS: case_1d = p([[1.0, 0.0], [1.0, 1.0]]), p([[-1.0, 0.0], [1.0, 1.0]]) TEST_CASES_AFFINE.append(case_1d) - case_2d_1 = p([[1.0, 0.0, 1.0], [1.0, 1.0, 1.0]]), p([[-1.0, 0.0, -1.0], [1.0, 1.0, 1.0]]) + case_2d_1 = (p([[1.0, 0.0, 1.0], [1.0, 1.0, 1.0]]), p([[-1.0, 0.0, -1.0], [1.0, 1.0, 1.0]])) TEST_CASES_AFFINE.append(case_2d_1) - case_2d_2 = p([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 1.0, 1.0]]), p( - [[-1.0, 0.0, -1.0], [0.0, -1.0, -1.0], [1.0, 1.0, 1.0]] + case_2d_2 = ( + p([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 1.0, 1.0]]), + p([[-1.0, 0.0, -1.0], [0.0, -1.0, -1.0], [1.0, 1.0, 1.0]]), ) TEST_CASES_AFFINE.append(case_2d_2) - case_3d = p([[1.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 2.0], [1.0, 1.0, 1.0, 3.0]]), p( - [[-1.0, 0.0, -1.0, -1.0], [0.0, -1.0, -1.0, -2.0], [1.0, 1.0, 1.0, 3.0]] + case_3d = ( + p([[1.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 2.0], [1.0, 1.0, 1.0, 3.0]]), + p([[-1.0, 0.0, -1.0, -1.0], [0.0, -1.0, -1.0, -2.0], [1.0, 1.0, 1.0, 3.0]]), ) TEST_CASES_AFFINE.append(case_3d) case_4d = p(np.ones((5, 5))), p([[-1] * 5, [-1] * 5, [1] * 5, [1] * 5, [1] * 5]) @@ -38,7 +40,6 @@ class TestITKWriter(unittest.TestCase): - @parameterized.expand(TEST_CASES_AFFINE) def test_ras_to_lps(self, param, expected): assert_allclose(orientation_ras_lps(param), expected) diff --git a/tests/test_zoom_affine.py b/tests/data/utils/test_zoom_affine.py similarity index 100% rename from tests/test_zoom_affine.py rename to tests/data/utils/test_zoom_affine.py diff --git a/tests/engines/__init__.py b/tests/engines/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/engines/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_ensemble_evaluator.py b/tests/engines/test_ensemble_evaluator.py similarity index 99% rename from tests/test_ensemble_evaluator.py rename to tests/engines/test_ensemble_evaluator.py index f5dc4bde52..581d04e36e 100644 --- a/tests/test_ensemble_evaluator.py +++ b/tests/engines/test_ensemble_evaluator.py @@ -26,13 +26,11 @@ class TestEnsembleEvaluator(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_content(self, pred_keys): device = torch.device("cpu:0") class TestDataset(torch.utils.data.Dataset): - def __len__(self): return 8 @@ -42,7 +40,6 @@ def __getitem__(self, index): val_loader = torch.utils.data.DataLoader(TestDataset()) class TestNet(torch.nn.Module): - def __init__(self, func): super().__init__() self.func = func diff --git a/tests/test_prepare_batch_default.py b/tests/engines/test_prepare_batch_default.py similarity index 99% rename from tests/test_prepare_batch_default.py rename to tests/engines/test_prepare_batch_default.py index cff3d38281..b132554889 100644 --- a/tests/test_prepare_batch_default.py +++ b/tests/engines/test_prepare_batch_default.py @@ -28,7 +28,6 @@ def forward(self, x: torch.Tensor): class TestPrepareBatchDefault(unittest.TestCase): - @parameterized.expand( [ ( diff --git a/tests/test_prepare_batch_default_dist.py b/tests/engines/test_prepare_batch_default_dist.py similarity index 99% rename from tests/test_prepare_batch_default_dist.py rename to tests/engines/test_prepare_batch_default_dist.py index c974db7898..be425dca78 100644 --- a/tests/test_prepare_batch_default_dist.py +++ b/tests/engines/test_prepare_batch_default_dist.py @@ -50,7 +50,6 @@ def forward(self, x: torch.Tensor): class DistributedPrepareBatchDefault(DistTestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) @DistCall(nnodes=1, nproc_per_node=2, node_rank=0) def test_compute(self, dataloaders): diff --git a/tests/test_prepare_batch_diffusion.py b/tests/engines/test_prepare_batch_diffusion.py similarity index 100% rename from tests/test_prepare_batch_diffusion.py rename to tests/engines/test_prepare_batch_diffusion.py diff --git a/tests/test_prepare_batch_extra_input.py b/tests/engines/test_prepare_batch_extra_input.py similarity index 99% rename from tests/test_prepare_batch_extra_input.py rename to tests/engines/test_prepare_batch_extra_input.py index 5b0afa3e38..241da09701 100644 --- a/tests/test_prepare_batch_extra_input.py +++ b/tests/engines/test_prepare_batch_extra_input.py @@ -43,7 +43,6 @@ def forward(self, x: torch.Tensor, t1=None, t2=None, t3=None): class TestPrepareBatchExtraInput(unittest.TestCase): - @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2]) def test_content(self, input_args, expected_value): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") diff --git a/tests/fl/__init__.py b/tests/fl/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/fl/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/fl/monai_algo/__init__.py b/tests/fl/monai_algo/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/fl/monai_algo/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_fl_monai_algo.py b/tests/fl/monai_algo/test_fl_monai_algo.py similarity index 99% rename from tests/test_fl_monai_algo.py rename to tests/fl/monai_algo/test_fl_monai_algo.py index d9bfe78d9c..2c1a8488cc 100644 --- a/tests/test_fl_monai_algo.py +++ b/tests/fl/monai_algo/test_fl_monai_algo.py @@ -17,6 +17,7 @@ import unittest from copy import deepcopy from os.path import join as pathjoin +from pathlib import Path from parameterized import parameterized @@ -28,7 +29,7 @@ from monai.utils import path_to_uri from tests.test_utils import SkipIfNoModule -_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__))) +_root_dir = Path(__file__).resolve().parents[2] _data_dir = os.path.join(_root_dir, "testing_data") _logging_file = pathjoin(_data_dir, "logging.conf") @@ -181,7 +182,6 @@ @SkipIfNoModule("ignite") @SkipIfNoModule("mlflow") class TestFLMonaiAlgo(unittest.TestCase): - @parameterized.expand([TEST_TRAIN_1, TEST_TRAIN_2, TEST_TRAIN_3, TEST_TRAIN_4]) def test_train(self, input_params): # initialize algo diff --git a/tests/test_fl_monai_algo_dist.py b/tests/fl/monai_algo/test_fl_monai_algo_dist.py similarity index 96% rename from tests/test_fl_monai_algo_dist.py rename to tests/fl/monai_algo/test_fl_monai_algo_dist.py index 84a2b11e4c..744169e10d 100644 --- a/tests/test_fl_monai_algo_dist.py +++ b/tests/fl/monai_algo/test_fl_monai_algo_dist.py @@ -14,6 +14,7 @@ import os import unittest from os.path import join as pathjoin +from pathlib import Path import torch.distributed as dist @@ -24,7 +25,8 @@ from monai.networks import get_state_dict from tests.test_utils import DistCall, DistTestCase, SkipIfBeforePyTorchVersion, SkipIfNoModule, skip_if_no_cuda -_root_dir = os.path.abspath(pathjoin(os.path.dirname(__file__))) +TESTS_PATH = TESTS_PATH = Path(__file__).parents[2].as_posix() +_root_dir = os.path.abspath(pathjoin(TESTS_PATH)) _data_dir = pathjoin(_root_dir, "testing_data") _logging_file = pathjoin(_data_dir, "logging.conf") @@ -32,7 +34,6 @@ @SkipIfNoModule("ignite") @SkipIfBeforePyTorchVersion((1, 11, 1)) class TestFLMonaiAlgo(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2, init_method="no_init") @skip_if_no_cuda def test_train(self): @@ -64,7 +65,7 @@ def test_train(self): data = ExchangeObject(weights=get_state_dict(network)) # test train for i in range(2): - print(f"Testing round {i+1} of {2}...") + print(f"Testing round {i + 1} of {2}...") # test evaluate metric_eo = algo.evaluate(data=data, extra={}) self.assertIsInstance(metric_eo, ExchangeObject) diff --git a/tests/test_fl_monai_algo_stats.py b/tests/fl/test_fl_monai_algo_stats.py similarity index 97% rename from tests/test_fl_monai_algo_stats.py rename to tests/fl/test_fl_monai_algo_stats.py index 92fb3e7b1f..917393be2c 100644 --- a/tests/test_fl_monai_algo_stats.py +++ b/tests/fl/test_fl_monai_algo_stats.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from parameterized import parameterized @@ -22,7 +23,7 @@ from monai.fl.utils.exchange_object import ExchangeObject from tests.test_utils import SkipIfNoModule -_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__))) +_root_dir = Path(__file__).resolve().parents[1] _data_dir = os.path.join(_root_dir, "testing_data") _logging_file = os.path.join(_data_dir, "logging.conf") @@ -64,7 +65,6 @@ @SkipIfNoModule("ignite") class TestFLMonaiAlgo(unittest.TestCase): - @parameterized.expand([TEST_GET_DATA_STATS_1, TEST_GET_DATA_STATS_2, TEST_GET_DATA_STATS_3]) def test_get_data_stats(self, input_params): # initialize algo diff --git a/tests/fl/utils/__init__.py b/tests/fl/utils/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/fl/utils/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_fl_exchange_object.py b/tests/fl/utils/test_fl_exchange_object.py similarity index 99% rename from tests/test_fl_exchange_object.py rename to tests/fl/utils/test_fl_exchange_object.py index 1698efa9ce..f83e8eaf1f 100644 --- a/tests/test_fl_exchange_object.py +++ b/tests/fl/utils/test_fl_exchange_object.py @@ -46,7 +46,6 @@ @SkipIfNoModule("torchvision") @SkipIfNoModule("ignite") class TestFLExchangeObject(unittest.TestCase): - @parameterized.expand([TEST_INIT_1, TEST_INIT_2]) def test_init(self, input_params, expected_str): eo = ExchangeObject(**input_params) diff --git a/tests/handlers/__init__.py b/tests/handlers/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/handlers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_handler_checkpoint_loader.py b/tests/handlers/test_handler_checkpoint_loader.py similarity index 99% rename from tests/test_handler_checkpoint_loader.py rename to tests/handlers/test_handler_checkpoint_loader.py index d366890ae6..e16775a2ea 100644 --- a/tests/test_handler_checkpoint_loader.py +++ b/tests/handlers/test_handler_checkpoint_loader.py @@ -23,7 +23,6 @@ class TestHandlerCheckpointLoader(unittest.TestCase): - def test_one_save_one_load(self): net1 = torch.nn.PReLU() data1 = net1.state_dict() diff --git a/tests/test_handler_checkpoint_saver.py b/tests/handlers/test_handler_checkpoint_saver.py similarity index 100% rename from tests/test_handler_checkpoint_saver.py rename to tests/handlers/test_handler_checkpoint_saver.py diff --git a/tests/test_handler_classification_saver.py b/tests/handlers/test_handler_classification_saver.py similarity index 100% rename from tests/test_handler_classification_saver.py rename to tests/handlers/test_handler_classification_saver.py diff --git a/tests/test_handler_classification_saver_dist.py b/tests/handlers/test_handler_classification_saver_dist.py similarity index 99% rename from tests/test_handler_classification_saver_dist.py rename to tests/handlers/test_handler_classification_saver_dist.py index 2e8edde05a..7942efb56e 100644 --- a/tests/test_handler_classification_saver_dist.py +++ b/tests/handlers/test_handler_classification_saver_dist.py @@ -27,7 +27,6 @@ class DistributedHandlerClassificationSaver(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_saved_content(self): with tempfile.TemporaryDirectory() as tempdir: diff --git a/tests/test_handler_clearml_image.py b/tests/handlers/test_handler_clearml_image.py similarity index 100% rename from tests/test_handler_clearml_image.py rename to tests/handlers/test_handler_clearml_image.py diff --git a/tests/test_handler_clearml_stats.py b/tests/handlers/test_handler_clearml_stats.py similarity index 100% rename from tests/test_handler_clearml_stats.py rename to tests/handlers/test_handler_clearml_stats.py diff --git a/tests/test_handler_confusion_matrix.py b/tests/handlers/test_handler_confusion_matrix.py similarity index 100% rename from tests/test_handler_confusion_matrix.py rename to tests/handlers/test_handler_confusion_matrix.py diff --git a/tests/test_handler_confusion_matrix_dist.py b/tests/handlers/test_handler_confusion_matrix_dist.py similarity index 99% rename from tests/test_handler_confusion_matrix_dist.py rename to tests/handlers/test_handler_confusion_matrix_dist.py index 44d61a95b1..626428c48e 100644 --- a/tests/test_handler_confusion_matrix_dist.py +++ b/tests/handlers/test_handler_confusion_matrix_dist.py @@ -23,7 +23,6 @@ class DistributedConfusionMatrix(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_compute(self): self._compute() diff --git a/tests/test_handler_decollate_batch.py b/tests/handlers/test_handler_decollate_batch.py similarity index 99% rename from tests/test_handler_decollate_batch.py rename to tests/handlers/test_handler_decollate_batch.py index d57b22d900..f0f1a36016 100644 --- a/tests/test_handler_decollate_batch.py +++ b/tests/handlers/test_handler_decollate_batch.py @@ -22,7 +22,6 @@ class TestHandlerDecollateBatch(unittest.TestCase): - def test_compute(self): data = [ {"image": torch.tensor([[[[2.0], [3.0]]]]), "filename": ["test1"]}, diff --git a/tests/test_handler_early_stop.py b/tests/handlers/test_handler_early_stop.py similarity index 100% rename from tests/test_handler_early_stop.py rename to tests/handlers/test_handler_early_stop.py diff --git a/tests/test_handler_garbage_collector.py b/tests/handlers/test_handler_garbage_collector.py similarity index 100% rename from tests/test_handler_garbage_collector.py rename to tests/handlers/test_handler_garbage_collector.py diff --git a/tests/test_handler_hausdorff_distance.py b/tests/handlers/test_handler_hausdorff_distance.py similarity index 100% rename from tests/test_handler_hausdorff_distance.py rename to tests/handlers/test_handler_hausdorff_distance.py diff --git a/tests/test_handler_ignite_metric.py b/tests/handlers/test_handler_ignite_metric.py similarity index 99% rename from tests/test_handler_ignite_metric.py rename to tests/handlers/test_handler_ignite_metric.py index 972b9928ba..d72db443e4 100644 --- a/tests/test_handler_ignite_metric.py +++ b/tests/handlers/test_handler_ignite_metric.py @@ -99,7 +99,6 @@ class TestHandlerIgniteMetricHandler(unittest.TestCase): - @SkipIfNoModule("ignite") @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_metric_fn(self, loss_params, metric_params, handler_params, expected_avg): diff --git a/tests/test_handler_lr_scheduler.py b/tests/handlers/test_handler_lr_scheduler.py similarity index 100% rename from tests/test_handler_lr_scheduler.py rename to tests/handlers/test_handler_lr_scheduler.py diff --git a/tests/test_handler_mean_dice.py b/tests/handlers/test_handler_mean_dice.py similarity index 100% rename from tests/test_handler_mean_dice.py rename to tests/handlers/test_handler_mean_dice.py diff --git a/tests/test_handler_mean_iou.py b/tests/handlers/test_handler_mean_iou.py similarity index 100% rename from tests/test_handler_mean_iou.py rename to tests/handlers/test_handler_mean_iou.py diff --git a/tests/test_handler_metrics_reloaded.py b/tests/handlers/test_handler_metrics_reloaded.py similarity index 99% rename from tests/test_handler_metrics_reloaded.py rename to tests/handlers/test_handler_metrics_reloaded.py index 65e8726c88..8c50ce7b59 100644 --- a/tests/test_handler_metrics_reloaded.py +++ b/tests/handlers/test_handler_metrics_reloaded.py @@ -73,7 +73,6 @@ @unittest.skipIf(not has_metrics, "MetricsReloaded not available.") class TestHandlerMetricsReloadedBinary(unittest.TestCase): - @parameterized.expand([TEST_CASE_BIN_1, TEST_CASE_BIN_2, TEST_CASE_BIN_3]) def test_compute(self, input_params, y_pred, y, expected_value): input_params["output_transform"] = from_engine(["pred", "label"]) @@ -114,7 +113,6 @@ def test_shape_mismatch(self, input_params, _y_pred, _y, _expected_value): @unittest.skipIf(not has_metrics, "MetricsReloaded not available.") class TestMetricsReloadedCategorical(unittest.TestCase): - @parameterized.expand([TEST_CASE_CAT_1, TEST_CASE_CAT_2]) def test_compute(self, input_params, y_pred, y, expected_value): input_params["output_transform"] = from_engine(["pred", "label"]) diff --git a/tests/test_handler_metrics_saver.py b/tests/handlers/test_handler_metrics_saver.py similarity index 100% rename from tests/test_handler_metrics_saver.py rename to tests/handlers/test_handler_metrics_saver.py diff --git a/tests/test_handler_metrics_saver_dist.py b/tests/handlers/test_handler_metrics_saver_dist.py similarity index 97% rename from tests/test_handler_metrics_saver_dist.py rename to tests/handlers/test_handler_metrics_saver_dist.py index 7140b94327..23de5e773a 100644 --- a/tests/test_handler_metrics_saver_dist.py +++ b/tests/handlers/test_handler_metrics_saver_dist.py @@ -27,7 +27,6 @@ class DistributedMetricsSaver(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_content(self): with tempfile.TemporaryDirectory() as tempdir: @@ -99,7 +98,7 @@ def _all_gather(engine): f_csv = csv.reader(f) for i, row in enumerate(f_csv): if i > 0: - expected = [f"{fnames[i-1]}\t{float(i):.4f}\t{float(i + 1):.4f}\t{i + 0.5:.4f}"] + expected = [f"{fnames[i - 1]}\t{float(i):.4f}\t{float(i + 1):.4f}\t{i + 0.5:.4f}"] self.assertEqual(row, expected) self.assertTrue(os.path.exists(os.path.join(tempdir, "metric3_summary.csv"))) # check the metric_summary.csv and content diff --git a/tests/test_handler_mlflow.py b/tests/handlers/test_handler_mlflow.py similarity index 99% rename from tests/test_handler_mlflow.py rename to tests/handlers/test_handler_mlflow.py index f717e0e88c..80630e6f5a 100644 --- a/tests/test_handler_mlflow.py +++ b/tests/handlers/test_handler_mlflow.py @@ -33,7 +33,6 @@ def get_event_filter(e): - def event_filter(_, event): if event in e: return True @@ -66,7 +65,6 @@ def _train_func(engine, batch): class TestHandlerMLFlow(unittest.TestCase): - def setUp(self): self.tmpdir_list = [] diff --git a/tests/test_handler_nvtx.py b/tests/handlers/test_handler_nvtx.py similarity index 99% rename from tests/test_handler_nvtx.py rename to tests/handlers/test_handler_nvtx.py index 9ad55f67e8..0d38f8e4ef 100644 --- a/tests/test_handler_nvtx.py +++ b/tests/handlers/test_handler_nvtx.py @@ -36,7 +36,6 @@ class TestHandlerDecollateBatch(unittest.TestCase): - @parameterized.expand([TEST_CASE_0, TEST_CASE_1]) @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX!") def test_compute(self, data, expected): diff --git a/tests/test_handler_panoptic_quality.py b/tests/handlers/test_handler_panoptic_quality.py similarity index 99% rename from tests/test_handler_panoptic_quality.py rename to tests/handlers/test_handler_panoptic_quality.py index 868ae45e21..9c8ab93f36 100644 --- a/tests/test_handler_panoptic_quality.py +++ b/tests/handlers/test_handler_panoptic_quality.py @@ -60,7 +60,6 @@ @SkipIfNoModule("scipy.optimize") class TestHandlerPanopticQuality(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_compute(self, input_params, expected_avg): metric = PanopticQuality(**input_params) diff --git a/tests/test_handler_parameter_scheduler.py b/tests/handlers/test_handler_parameter_scheduler.py similarity index 99% rename from tests/test_handler_parameter_scheduler.py rename to tests/handlers/test_handler_parameter_scheduler.py index 3c91622b90..8f54bea3bb 100644 --- a/tests/test_handler_parameter_scheduler.py +++ b/tests/handlers/test_handler_parameter_scheduler.py @@ -21,7 +21,6 @@ class ToyNet(Module): - def __init__(self, value): super().__init__() self.value = value @@ -37,7 +36,6 @@ def set_value(self, value): class TestHandlerParameterScheduler(unittest.TestCase): - def test_linear_scheduler(self): # Testing step_constant net = ToyNet(value=-1) @@ -118,7 +116,6 @@ def test_multistep_scheduler(self): assert_allclose(net.get_value(), 10 * 0.99 * 0.99) def test_custom_scheduler(self): - def custom_logic(initial_value, gamma, current_step): return initial_value * gamma ** (current_step % 9) diff --git a/tests/test_handler_post_processing.py b/tests/handlers/test_handler_post_processing.py similarity index 99% rename from tests/test_handler_post_processing.py rename to tests/handlers/test_handler_post_processing.py index a0e2a8ca0f..4103840e77 100644 --- a/tests/test_handler_post_processing.py +++ b/tests/handlers/test_handler_post_processing.py @@ -40,7 +40,6 @@ class TestHandlerPostProcessing(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_compute(self, input_params, decollate, expected): data = [ diff --git a/tests/test_handler_prob_map_producer.py b/tests/handlers/test_handler_prob_map_producer.py similarity index 96% rename from tests/test_handler_prob_map_producer.py rename to tests/handlers/test_handler_prob_map_producer.py index 406fe77c8f..063f7984cb 100644 --- a/tests/test_handler_prob_map_producer.py +++ b/tests/handlers/test_handler_prob_map_producer.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path import numpy as np import torch @@ -72,7 +73,6 @@ def _iteration(self, engine, batchdata): class TestHandlerProbMapGenerator(unittest.TestCase): - @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2]) def test_prob_map_generator(self, name, size): # set up dataset @@ -86,8 +86,9 @@ def inference(engine, batch): engine = Engine(inference) + tests_path = Path(__file__).parents[1].as_posix() # add ProbMapGenerator() to evaluator - output_dir = os.path.join(os.path.dirname(__file__), "testing_data") + output_dir = os.path.join(tests_path, "testing_data") prob_map_gen = ProbMapProducer(output_dir=output_dir) evaluator = TestEvaluator( diff --git a/tests/test_handler_regression_metrics.py b/tests/handlers/test_handler_regression_metrics.py similarity index 100% rename from tests/test_handler_regression_metrics.py rename to tests/handlers/test_handler_regression_metrics.py diff --git a/tests/test_handler_regression_metrics_dist.py b/tests/handlers/test_handler_regression_metrics_dist.py similarity index 99% rename from tests/test_handler_regression_metrics_dist.py rename to tests/handlers/test_handler_regression_metrics_dist.py index 8a455d0470..98c1b373c6 100644 --- a/tests/test_handler_regression_metrics_dist.py +++ b/tests/handlers/test_handler_regression_metrics_dist.py @@ -57,7 +57,6 @@ def psnrmetric_np(max_val, y_pred, y): class DistributedMeanSquaredError(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_compute(self): set_determinism(123) @@ -104,7 +103,6 @@ def _val_func(engine, batch): class DistributedMeanAbsoluteError(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_compute(self): set_determinism(123) @@ -151,7 +149,6 @@ def _val_func(engine, batch): class DistributedRootMeanSquaredError(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_compute(self): set_determinism(123) @@ -198,7 +195,6 @@ def _val_func(engine, batch): class DistributedPeakSignalToNoiseRatio(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_compute(self): set_determinism(123) diff --git a/tests/test_handler_rocauc.py b/tests/handlers/test_handler_rocauc.py similarity index 100% rename from tests/test_handler_rocauc.py rename to tests/handlers/test_handler_rocauc.py diff --git a/tests/test_handler_rocauc_dist.py b/tests/handlers/test_handler_rocauc_dist.py similarity index 99% rename from tests/test_handler_rocauc_dist.py rename to tests/handlers/test_handler_rocauc_dist.py index 544653f037..cfc525ebc4 100644 --- a/tests/test_handler_rocauc_dist.py +++ b/tests/handlers/test_handler_rocauc_dist.py @@ -23,7 +23,6 @@ class DistributedROCAUC(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2, node_rank=0) def test_compute(self): auc_metric = ROCAUC() diff --git a/tests/test_handler_stats.py b/tests/handlers/test_handler_stats.py similarity index 100% rename from tests/test_handler_stats.py rename to tests/handlers/test_handler_stats.py diff --git a/tests/test_handler_surface_distance.py b/tests/handlers/test_handler_surface_distance.py similarity index 100% rename from tests/test_handler_surface_distance.py rename to tests/handlers/test_handler_surface_distance.py diff --git a/tests/test_handler_tb_image.py b/tests/handlers/test_handler_tb_image.py similarity index 99% rename from tests/test_handler_tb_image.py rename to tests/handlers/test_handler_tb_image.py index b01ef1b26f..2ed37bdbc7 100644 --- a/tests/test_handler_tb_image.py +++ b/tests/handlers/test_handler_tb_image.py @@ -33,7 +33,6 @@ @unittest.skipUnless(has_tb, "Requires SummaryWriter installation") @SkipIfBeforePyTorchVersion((1, 13)) # issue 6683 class TestHandlerTBImage(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_tb_image_shape(self, shape): with tempfile.TemporaryDirectory() as tempdir: diff --git a/tests/test_handler_tb_stats.py b/tests/handlers/test_handler_tb_stats.py similarity index 100% rename from tests/test_handler_tb_stats.py rename to tests/handlers/test_handler_tb_stats.py diff --git a/tests/test_handler_validation.py b/tests/handlers/test_handler_validation.py similarity index 100% rename from tests/test_handler_validation.py rename to tests/handlers/test_handler_validation.py diff --git a/tests/test_trt_compile.py b/tests/handlers/test_trt_compile.py similarity index 99% rename from tests/test_trt_compile.py rename to tests/handlers/test_trt_compile.py index a8fdd02f20..6b0d329af6 100644 --- a/tests/test_trt_compile.py +++ b/tests/handlers/test_trt_compile.py @@ -52,7 +52,6 @@ def forward(self, x: list[torch.Tensor], y: torch.Tensor, z: torch.Tensor, bs: f @unittest.skipUnless(polygraphy_imported, "polygraphy is required") @SkipIfBeforeComputeCapabilityVersion((7, 5)) class TestTRTCompile(unittest.TestCase): - def setUp(self): self.gpu_device = torch.cuda.current_device() diff --git a/tests/test_write_metrics_reports.py b/tests/handlers/test_write_metrics_reports.py similarity index 100% rename from tests/test_write_metrics_reports.py rename to tests/handlers/test_write_metrics_reports.py diff --git a/tests/hvd_evenly_divisible_all_gather.py b/tests/hvd_evenly_divisible_all_gather.py index 24d1575f8f..57c110a76d 100644 --- a/tests/hvd_evenly_divisible_all_gather.py +++ b/tests/hvd_evenly_divisible_all_gather.py @@ -21,7 +21,6 @@ class HvdEvenlyDivisibleAllGather: - def test_data(self): # initialize Horovod hvd.init() diff --git a/tests/inferers/__init__.py b/tests/inferers/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/inferers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_avg_merger.py b/tests/inferers/test_avg_merger.py similarity index 99% rename from tests/test_avg_merger.py rename to tests/inferers/test_avg_merger.py index 9e6988e854..398ea33a00 100644 --- a/tests/test_avg_merger.py +++ b/tests/inferers/test_avg_merger.py @@ -137,7 +137,6 @@ class AvgMergerTests(unittest.TestCase): - @parameterized.expand( [ TEST_CASE_0_DEFAULT_DTYPE, diff --git a/tests/test_controlnet_inferers.py b/tests/inferers/test_controlnet_inferers.py similarity index 100% rename from tests/test_controlnet_inferers.py rename to tests/inferers/test_controlnet_inferers.py diff --git a/tests/test_diffusion_inferer.py b/tests/inferers/test_diffusion_inferer.py similarity index 100% rename from tests/test_diffusion_inferer.py rename to tests/inferers/test_diffusion_inferer.py diff --git a/tests/test_latent_diffusion_inferer.py b/tests/inferers/test_latent_diffusion_inferer.py similarity index 100% rename from tests/test_latent_diffusion_inferer.py rename to tests/inferers/test_latent_diffusion_inferer.py diff --git a/tests/test_patch_inferer.py b/tests/inferers/test_patch_inferer.py similarity index 99% rename from tests/test_patch_inferer.py rename to tests/inferers/test_patch_inferer.py index 2deab6fe73..964f08e6fe 100644 --- a/tests/test_patch_inferer.py +++ b/tests/inferers/test_patch_inferer.py @@ -245,7 +245,6 @@ class PatchInfererTests(unittest.TestCase): - @parameterized.expand( [ TEST_CASE_0_TENSOR, diff --git a/tests/test_saliency_inferer.py b/tests/inferers/test_saliency_inferer.py similarity index 100% rename from tests/test_saliency_inferer.py rename to tests/inferers/test_saliency_inferer.py diff --git a/tests/test_slice_inferer.py b/tests/inferers/test_slice_inferer.py similarity index 100% rename from tests/test_slice_inferer.py rename to tests/inferers/test_slice_inferer.py diff --git a/tests/test_sliding_window_inference.py b/tests/inferers/test_sliding_window_inference.py similarity index 99% rename from tests/test_sliding_window_inference.py rename to tests/inferers/test_sliding_window_inference.py index 5949080405..997822edd3 100644 --- a/tests/test_sliding_window_inference.py +++ b/tests/inferers/test_sliding_window_inference.py @@ -70,10 +70,8 @@ class TestSlidingWindowInference(unittest.TestCase): - @parameterized.expand(BUFFER_CASES) def test_buffers(self, size_params, buffer_steps, buffer_dim, device_params): - def mult_two(patch, *args, **kwargs): return 2.0 * patch diff --git a/tests/test_sliding_window_splitter.py b/tests/inferers/test_sliding_window_splitter.py similarity index 99% rename from tests/test_sliding_window_splitter.py rename to tests/inferers/test_sliding_window_splitter.py index daf1fcdc91..868e3f096f 100644 --- a/tests/test_sliding_window_splitter.py +++ b/tests/inferers/test_sliding_window_splitter.py @@ -236,7 +236,6 @@ def missing_parameter_filter(patch): class SlidingWindowSplitterTests(unittest.TestCase): - @parameterized.expand( [ TEST_CASE_TENSOR_0, diff --git a/tests/test_wsi_sliding_window_splitter.py b/tests/inferers/test_wsi_sliding_window_splitter.py similarity index 98% rename from tests/test_wsi_sliding_window_splitter.py rename to tests/inferers/test_wsi_sliding_window_splitter.py index 0494cc18da..228435f1b4 100644 --- a/tests/test_wsi_sliding_window_splitter.py +++ b/tests/inferers/test_wsi_sliding_window_splitter.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from unittest import skipUnless import torch @@ -36,7 +37,8 @@ WSI_READER_CLASS = OpenSlideWSIReader WSI_GENERIC_TIFF_KEY = "wsi_generic_tiff" -WSI_GENERIC_TIFF_PATH = os.path.join(os.path.dirname(__file__), "testing_data", f"temp_{WSI_GENERIC_TIFF_KEY}.tiff") +TESTS_PATH = Path(__file__).parents[1] +WSI_GENERIC_TIFF_PATH = os.path.join(TESTS_PATH, "testing_data", f"temp_{WSI_GENERIC_TIFF_KEY}.tiff") HEIGHT = 32914 WIDTH = 46000 @@ -102,7 +104,6 @@ # Filtering functions test cases def gen_location_filter(locations): - def my_filter(patch, loc): if loc in locations: return False @@ -199,7 +200,6 @@ def setUpModule(): class WSISlidingWindowSplitterTests(unittest.TestCase): - @parameterized.expand( [ TEST_CASE_WSI_0_BASE, diff --git a/tests/test_zarr_avg_merger.py b/tests/inferers/test_zarr_avg_merger.py similarity index 100% rename from tests/test_zarr_avg_merger.py rename to tests/inferers/test_zarr_avg_merger.py diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/integration/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_auto3dseg_ensemble.py b/tests/integration/test_auto3dseg_ensemble.py similarity index 99% rename from tests/test_auto3dseg_ensemble.py rename to tests/integration/test_auto3dseg_ensemble.py index bd742fba43..08b059e3f7 100644 --- a/tests/test_auto3dseg_ensemble.py +++ b/tests/integration/test_auto3dseg_ensemble.py @@ -112,7 +112,6 @@ def create_sim_data(dataroot, sim_datalist, sim_dim, **kwargs): @SkipIfBeforePyTorchVersion((1, 11, 1)) @unittest.skipIf(not has_tb, "no tensorboard summary writer") class TestEnsembleBuilder(unittest.TestCase): - def setUp(self) -> None: set_determinism(0) self.test_dir = tempfile.TemporaryDirectory() diff --git a/tests/test_auto3dseg_hpo.py b/tests/integration/test_auto3dseg_hpo.py similarity index 99% rename from tests/test_auto3dseg_hpo.py rename to tests/integration/test_auto3dseg_hpo.py index cedff8e99a..f0d3492004 100644 --- a/tests/test_auto3dseg_hpo.py +++ b/tests/integration/test_auto3dseg_hpo.py @@ -79,7 +79,6 @@ def skip_if_no_optuna(obj): @SkipIfBeforePyTorchVersion((1, 11, 1)) @unittest.skipIf(not has_tb, "no tensorboard summary writer") class TestHPO(unittest.TestCase): - def setUp(self) -> None: self.test_dir = tempfile.TemporaryDirectory() test_path = self.test_dir.name @@ -155,7 +154,6 @@ def test_run_optuna(self) -> None: algo = algo_dict[AlgoKeys.ALGO] class OptunaGenLearningRate(OptunaGen): - def get_hyperparameters(self): return {"learning_rate": self.trial.suggest_float("learning_rate", 0.00001, 0.1)} diff --git a/tests/test_deepedit_interaction.py b/tests/integration/test_deepedit_interaction.py similarity index 100% rename from tests/test_deepedit_interaction.py rename to tests/integration/test_deepedit_interaction.py diff --git a/tests/test_downsample_block.py b/tests/integration/test_downsample_block.py similarity index 100% rename from tests/test_downsample_block.py rename to tests/integration/test_downsample_block.py diff --git a/tests/test_hovernet_nuclear_type_post_processingd.py b/tests/integration/test_hovernet_nuclear_type_post_processingd.py similarity index 99% rename from tests/test_hovernet_nuclear_type_post_processingd.py rename to tests/integration/test_hovernet_nuclear_type_post_processingd.py index 89ab730211..f2117dc15e 100644 --- a/tests/test_hovernet_nuclear_type_post_processingd.py +++ b/tests/integration/test_hovernet_nuclear_type_post_processingd.py @@ -42,7 +42,6 @@ @unittest.skipUnless(has_scipy, "Requires scipy library.") @unittest.skipUnless(has_skimage, "Requires scikit-image library.") class TestHoVerNetNuclearTypePostProcessingd(unittest.TestCase): - @parameterized.expand(TEST_CASE) def test_value(self, in_type, test_data, kwargs, expected): input = { diff --git a/tests/test_integration_autorunner.py b/tests/integration/test_integration_autorunner.py similarity index 99% rename from tests/test_integration_autorunner.py rename to tests/integration/test_integration_autorunner.py index 5b761271d6..52906bde05 100644 --- a/tests/test_integration_autorunner.py +++ b/tests/integration/test_integration_autorunner.py @@ -71,7 +71,6 @@ @SkipIfBeforePyTorchVersion((1, 11, 1)) # for mem_get_info @unittest.skipIf(not has_tb, "no tensorboard summary writer") class TestAutoRunner(unittest.TestCase): - def setUp(self) -> None: self.test_dir = tempfile.TemporaryDirectory() test_path = self.test_dir.name diff --git a/tests/test_integration_bundle_run.py b/tests/integration/test_integration_bundle_run.py similarity index 94% rename from tests/test_integration_bundle_run.py rename to tests/integration/test_integration_bundle_run.py index eec7504566..cfbbcfe154 100644 --- a/tests/test_integration_bundle_run.py +++ b/tests/integration/test_integration_bundle_run.py @@ -19,6 +19,7 @@ import tempfile import unittest from glob import glob +from pathlib import Path import nibabel as nib import numpy as np @@ -31,13 +32,13 @@ from monai.utils import path_to_uri from tests.test_utils import command_line_tests -TEST_CASE_1 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.json"), (128, 128, 128)] +TESTS_PATH = Path(__file__).parents[1] +TEST_CASE_1 = [os.path.join(TESTS_PATH, "testing_data", "inference.json"), (128, 128, 128)] -TEST_CASE_2 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.yaml"), (128, 128, 128)] +TEST_CASE_2 = [os.path.join(TESTS_PATH, "testing_data", "inference.yaml"), (128, 128, 128)] class _Runnable42: - def __init__(self, val=1): self.val = val @@ -47,7 +48,6 @@ def run(self): class _Runnable43: - def __init__(self, func): self.func = func @@ -56,7 +56,6 @@ def run(self): class TestBundleRun(unittest.TestCase): - def setUp(self): self.data_dir = tempfile.mkdtemp() @@ -69,7 +68,7 @@ def test_tiny(self): with open(config_file, "w") as f: json.dump( { - "trainer": {"_target_": "tests.test_integration_bundle_run._Runnable42", "val": 42}, + "trainer": {"_target_": "tests.integration.test_integration_bundle_run._Runnable42", "val": 42}, # keep this test case to cover the "run_id" arg "training": "$@trainer.run()", }, @@ -105,7 +104,7 @@ def test_scripts_fold(self): { "imports": ["$import scripts"], "trainer": { - "_target_": "tests.test_integration_bundle_run._Runnable43", + "_target_": "tests.integration.test_integration_bundle_run._Runnable43", "func": "$scripts.tiny_test", }, # keep this test case to cover the "run_id" arg @@ -161,7 +160,7 @@ def test_shape(self, config_file, expected_shape): nib.save(nib.Nifti1Image(test_image, np.eye(4)), filename) # generate default args in a JSON file - logging_conf = os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf") + logging_conf = os.path.join(TESTS_PATH, "testing_data", "logging.conf") def_args = {"config_file": "will be replaced by `config_file` arg", "logging_file": logging_conf} def_args_file = os.path.join(tempdir, "def_args.json") ConfigParser.export_config_file(config=def_args, filepath=def_args_file) diff --git a/tests/test_integration_classification_2d.py b/tests/integration/test_integration_classification_2d.py similarity index 97% rename from tests/test_integration_classification_2d.py rename to tests/integration/test_integration_classification_2d.py index bcf686f8d9..fd9e58aaf8 100644 --- a/tests/test_integration_classification_2d.py +++ b/tests/integration/test_integration_classification_2d.py @@ -45,7 +45,6 @@ class MedNISTDataset(torch.utils.data.Dataset): - def __init__(self, image_files, labels, transforms): self.image_files = image_files self.labels = labels @@ -95,7 +94,7 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(train_y))).to(device) loss_function = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), 1e-5) - epoch_num = 4 + epoch_num = 1 val_interval = 1 # start training validation @@ -128,7 +127,7 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: - val_images, val_labels = val_data[0].to(device), val_data[1].to(device) + val_images, val_labels = (val_data[0].to(device), val_data[1].to(device)) y_pred = torch.cat([y_pred, model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) @@ -149,7 +148,7 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", torch.save(model.state_dict(), model_filename) print("saved new best metric model") print( - f"current epoch {epoch +1} current AUC: {auc_value:0.4f} " + f"current epoch {epoch + 1} current AUC: {auc_value:0.4f} " f"current accuracy: {acc_metric:0.4f} best AUC: {best_metric:0.4f} at epoch {best_metric_epoch}" ) print(f"train completed, best_metric: {best_metric:0.4f} at epoch: {best_metric_epoch}") @@ -183,10 +182,9 @@ def run_inference_test(root_dir, test_x, test_y, device="cuda:0", num_workers=10 @skip_if_quick class IntegrationClassification2D(DistTestCase): - def setUp(self): set_determinism(seed=0) - self.data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + self.data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../testing_data") data_dir = os.path.join(self.data_dir, "MedNIST") dataset_file = os.path.join(self.data_dir, "MedNIST.tar.gz") @@ -270,7 +268,7 @@ def train_and_infer(self, idx=0): def test_training(self): repeated = [] - for i in range(2): + for i in range(1): results = self.train_and_infer(i) repeated.append(results) np.testing.assert_allclose(repeated[0], repeated[1]) diff --git a/tests/test_integration_determinism.py b/tests/integration/test_integration_determinism.py similarity index 99% rename from tests/test_integration_determinism.py rename to tests/integration/test_integration_determinism.py index 37dcf013fc..f48a35c70e 100644 --- a/tests/test_integration_determinism.py +++ b/tests/integration/test_integration_determinism.py @@ -26,9 +26,7 @@ def run_test(batch_size=64, train_steps=200, device="cuda:0"): - class _TestBatch(Dataset): - def __init__(self, transforms): self.transforms = transforms @@ -78,7 +76,6 @@ def __len__(self): class TestDeterminism(DistTestCase): - def setUp(self): self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0") diff --git a/tests/test_integration_fast_train.py b/tests/integration/test_integration_fast_train.py similarity index 99% rename from tests/test_integration_fast_train.py rename to tests/integration/test_integration_fast_train.py index f00aeab9a5..f9beb5613d 100644 --- a/tests/test_integration_fast_train.py +++ b/tests/integration/test_integration_fast_train.py @@ -58,7 +58,6 @@ @skip_if_no_cuda @skip_if_quick class IntegrationFastTrain(DistTestCase): - def setUp(self): set_determinism(seed=0) monai.config.print_config() diff --git a/tests/test_integration_gpu_customization.py b/tests/integration/test_integration_gpu_customization.py similarity index 99% rename from tests/test_integration_gpu_customization.py rename to tests/integration/test_integration_gpu_customization.py index fc90837a1a..42f7d52e21 100644 --- a/tests/test_integration_gpu_customization.py +++ b/tests/integration/test_integration_gpu_customization.py @@ -70,7 +70,6 @@ @SkipIfBeforePyTorchVersion((1, 11, 1)) # module 'torch.cuda' has no attribute 'mem_get_info' @unittest.skipIf(not has_tb, "no tensorboard summary writer") class TestEnsembleGpuCustomization(unittest.TestCase): - def setUp(self) -> None: self.test_dir = tempfile.TemporaryDirectory() diff --git a/tests/test_integration_lazy_samples.py b/tests/integration/test_integration_lazy_samples.py similarity index 99% rename from tests/test_integration_lazy_samples.py rename to tests/integration/test_integration_lazy_samples.py index 3be4bbe36e..834e9ccb2a 100644 --- a/tests/test_integration_lazy_samples.py +++ b/tests/integration/test_integration_lazy_samples.py @@ -160,7 +160,6 @@ def run_training_test(root_dir, device="cuda:0", cachedataset=0, readers=(None, @skip_if_quick @SkipIfBeforePyTorchVersion((1, 11)) class IntegrationLazyResampling(DistTestCase): - def setUp(self): monai.config.print_config() set_determinism(seed=0) diff --git a/tests/test_integration_nnunetv2_runner.py b/tests/integration/test_integration_nnunetv2_runner.py similarity index 99% rename from tests/test_integration_nnunetv2_runner.py rename to tests/integration/test_integration_nnunetv2_runner.py index 7c9e2c386c..9717e06170 100644 --- a/tests/test_integration_nnunetv2_runner.py +++ b/tests/integration/test_integration_nnunetv2_runner.py @@ -49,7 +49,6 @@ @unittest.skipIf(not has_tb, "no tensorboard summary writer") @unittest.skipIf(not has_nnunet, "no nnunetv2") class TestnnUNetV2Runner(unittest.TestCase): - def setUp(self) -> None: self.test_dir = tempfile.TemporaryDirectory() test_path = self.test_dir.name diff --git a/tests/test_integration_segmentation_3d.py b/tests/integration/test_integration_segmentation_3d.py similarity index 97% rename from tests/test_integration_segmentation_3d.py rename to tests/integration/test_integration_segmentation_3d.py index 8176489c2b..fb2937739f 100644 --- a/tests/test_integration_segmentation_3d.py +++ b/tests/integration/test_integration_segmentation_3d.py @@ -139,7 +139,7 @@ def run_training_test(root_dir, device="cuda:0", cachedataset=0, readers=(None, writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step) epoch_loss /= step epoch_loss_values.append(epoch_loss) - print(f"epoch {epoch +1} average loss:{epoch_loss:0.4f}") + print(f"epoch {epoch + 1} average loss:{epoch_loss:0.4f}") if (epoch + 1) % val_interval == 0: with eval_mode(model): @@ -147,7 +147,7 @@ def run_training_test(root_dir, device="cuda:0", cachedataset=0, readers=(None, val_labels = None val_outputs = None for val_data in val_loader: - val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device) + val_images, val_labels = (val_data["img"].to(device), val_data["seg"].to(device)) sw_batch_size, roi_size = 4, (96, 96, 96) val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model) # decollate prediction into a list and execute post processing for every item @@ -164,7 +164,7 @@ def run_training_test(root_dir, device="cuda:0", cachedataset=0, readers=(None, torch.save(model.state_dict(), model_filename) print("saved new best metric model") print( - f"current epoch {epoch +1} current mean dice: {metric:0.4f} " + f"current epoch {epoch + 1} current mean dice: {metric:0.4f} " f"best mean dice: {best_metric:0.4f} at epoch {best_metric_epoch}" ) writer.add_scalar("val_mean_dice", metric, epoch + 1) @@ -221,7 +221,7 @@ def run_inference_test(root_dir, device="cuda:0"): # resampling with align_corners=True or dtype=float64 will generate # slight different results between PyTorch 1.5 an 1.6 for val_data in val_loader: - val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device) + val_images, val_labels = (val_data["img"].to(device), val_data["seg"].to(device)) # define sliding window size and batch size for windows inference sw_batch_size, roi_size = 4, (96, 96, 96) val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model) @@ -235,7 +235,6 @@ def run_inference_test(root_dir, device="cuda:0"): @skip_if_quick class IntegrationSegmentation3D(DistTestCase): - def setUp(self): set_determinism(seed=0) diff --git a/tests/test_integration_sliding_window.py b/tests/integration/test_integration_sliding_window.py similarity index 99% rename from tests/test_integration_sliding_window.py rename to tests/integration/test_integration_sliding_window.py index 29d2e6f107..a3e95b1d87 100644 --- a/tests/test_integration_sliding_window.py +++ b/tests/integration/test_integration_sliding_window.py @@ -72,7 +72,6 @@ def save_func(engine): @skip_if_quick class TestIntegrationSlidingWindow(DistTestCase): - def setUp(self): set_determinism(seed=0) diff --git a/tests/test_integration_stn.py b/tests/integration/test_integration_stn.py similarity index 99% rename from tests/test_integration_stn.py rename to tests/integration/test_integration_stn.py index 579afc2eb9..b9f0f975e8 100644 --- a/tests/test_integration_stn.py +++ b/tests/integration/test_integration_stn.py @@ -98,7 +98,6 @@ def compare_2d(is_ref=True, device=None, reverse_indexing=False): class TestSpatialTransformerCore(DistTestCase): - def setUp(self): self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0") diff --git a/tests/test_integration_unet_2d.py b/tests/integration/test_integration_unet_2d.py similarity index 99% rename from tests/test_integration_unet_2d.py rename to tests/integration/test_integration_unet_2d.py index 45723f53ca..706b0e0afb 100644 --- a/tests/test_integration_unet_2d.py +++ b/tests/integration/test_integration_unet_2d.py @@ -25,9 +25,7 @@ def run_test(net_name="basicunet", batch_size=64, train_steps=100, device="cuda:0"): - class _TestBatch(Dataset): - def __getitem__(self, _unused_id): im, seg = create_test_image_2d(128, 128, noise_max=1, num_objs=4, num_seg_classes=1) return im[None], seg[None].astype(np.float32) @@ -57,7 +55,6 @@ def __len__(self): @skip_if_quick class TestIntegrationUnet2D(DistTestCase): - @TimedCall(seconds=20, daemon=False) def test_unet_training(self): for n in ["basicunet", "unet"]: diff --git a/tests/test_integration_workers.py b/tests/integration/test_integration_workers.py similarity index 99% rename from tests/test_integration_workers.py rename to tests/integration/test_integration_workers.py index 83dd023eaf..6b6ee057f5 100644 --- a/tests/test_integration_workers.py +++ b/tests/integration/test_integration_workers.py @@ -44,7 +44,6 @@ def run_loading_test(num_workers=50, device=None, pw=False): @skip_if_no_cuda @SkipIfBeforePyTorchVersion((1, 9)) class IntegrationLoading(DistTestCase): - def tearDown(self): set_determinism(seed=None) diff --git a/tests/test_integration_workflows.py b/tests/integration/test_integration_workflows.py similarity index 99% rename from tests/test_integration_workflows.py rename to tests/integration/test_integration_workflows.py index 3fbdcca078..2e14209480 100644 --- a/tests/test_integration_workflows.py +++ b/tests/integration/test_integration_workflows.py @@ -118,7 +118,6 @@ def run_training_test(root_dir, device="cuda:0", amp=False, num_workers=4): ) class _TestEvalIterEvents: - def attach(self, engine): engine.add_event_handler(IterationEvents.FORWARD_COMPLETED, self._forward_completed) @@ -161,7 +160,6 @@ def _forward_completed(self, engine): ) class _TestTrainIterEvents: - def attach(self, engine): engine.add_event_handler(IterationEvents.FORWARD_COMPLETED, self._forward_completed) engine.add_event_handler(IterationEvents.LOSS_COMPLETED, self._loss_completed) @@ -286,7 +284,6 @@ def save_func(engine): @skip_if_quick class IntegrationWorkflows(DistTestCase): - def setUp(self): set_determinism(seed=0) diff --git a/tests/test_integration_workflows_adversarial.py b/tests/integration/test_integration_workflows_adversarial.py similarity index 100% rename from tests/test_integration_workflows_adversarial.py rename to tests/integration/test_integration_workflows_adversarial.py diff --git a/tests/test_integration_workflows_gan.py b/tests/integration/test_integration_workflows_gan.py similarity index 99% rename from tests/test_integration_workflows_gan.py rename to tests/integration/test_integration_workflows_gan.py index a03fecbf3e..adf86df59b 100644 --- a/tests/test_integration_workflows_gan.py +++ b/tests/integration/test_integration_workflows_gan.py @@ -127,7 +127,6 @@ def generator_loss(gen_images): @skip_if_quick class IntegrationWorkflowsGAN(DistTestCase): - def setUp(self): set_determinism(seed=0) diff --git a/tests/test_loader_semaphore.py b/tests/integration/test_loader_semaphore.py similarity index 100% rename from tests/test_loader_semaphore.py rename to tests/integration/test_loader_semaphore.py diff --git a/tests/test_mapping_filed.py b/tests/integration/test_mapping_filed.py similarity index 100% rename from tests/test_mapping_filed.py rename to tests/integration/test_mapping_filed.py diff --git a/tests/test_meta_affine.py b/tests/integration/test_meta_affine.py similarity index 97% rename from tests/test_meta_affine.py rename to tests/integration/test_meta_affine.py index e81852cfd6..8c3b4f287e 100644 --- a/tests/test_meta_affine.py +++ b/tests/integration/test_meta_affine.py @@ -14,6 +14,7 @@ import os import unittest from copy import deepcopy +from pathlib import Path import numpy as np from parameterized import parameterized @@ -41,8 +42,9 @@ keys = ("img1", "img2") key, key_1 = "ref_avg152T1_LR", "ref_avg152T1_RL" -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", f"{key}.nii.gz") -FILE_PATH_1 = os.path.join(os.path.dirname(__file__), "testing_data", f"{key_1}.nii.gz") +TESTS_PATH = Path(__file__).parents[1] +FILE_PATH = os.path.join(TESTS_PATH, "testing_data", f"{key}.nii.gz") +FILE_PATH_1 = os.path.join(TESTS_PATH, "testing_data", f"{key_1}.nii.gz") TEST_CASES_ARRAY = [ [Compose([Spacing(pixdim=(1.0, 1.1, 1.2)), Orientation(axcodes="RAS")]), {}, TINY_DIFF], @@ -123,7 +125,6 @@ def _resample_to_affine(itk_obj, ref_obj): @unittest.skipUnless(has_itk, "Requires itk package.") class TestAffineConsistencyITK(unittest.TestCase): - @classmethod def setUpClass(cls): super().setUpClass() diff --git a/tests/test_metatensor_integration.py b/tests/integration/test_metatensor_integration.py similarity index 94% rename from tests/test_metatensor_integration.py rename to tests/integration/test_metatensor_integration.py index 11c51ebb77..70ebc5174e 100644 --- a/tests/test_metatensor_integration.py +++ b/tests/integration/test_metatensor_integration.py @@ -15,6 +15,7 @@ import tempfile import unittest from copy import deepcopy +from pathlib import Path import numpy as np from parameterized import parameterized @@ -32,14 +33,14 @@ keys = ("img", "seg") key, key_1 = "MNI152_T1_2mm", "MNI152_T1_2mm_strucseg" -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", f"{key}.nii.gz") -FILE_PATH_1 = os.path.join(os.path.dirname(__file__), "testing_data", f"{key_1}.nii.gz") -TEST_CASES = os.path.join(os.path.dirname(__file__), "testing_data", "transform_metatensor_cases.yaml") +TESTS_PATH = Path(__file__).parents[1] +FILE_PATH = os.path.join(TESTS_PATH, "testing_data", f"{key}.nii.gz") +FILE_PATH_1 = os.path.join(TESTS_PATH, "testing_data", f"{key_1}.nii.gz") +TEST_CASES = os.path.join(TESTS_PATH, "testing_data", "transform_metatensor_cases.yaml") @unittest.skipUnless(has_nib, "Requires nibabel package.") class TestMetaTensorIntegration(unittest.TestCase): - @classmethod def setUpClass(cls): super().setUpClass() diff --git a/tests/test_module_list.py b/tests/integration/test_module_list.py similarity index 100% rename from tests/test_module_list.py rename to tests/integration/test_module_list.py diff --git a/tests/test_one_of.py b/tests/integration/test_one_of.py similarity index 100% rename from tests/test_one_of.py rename to tests/integration/test_one_of.py diff --git a/tests/test_pad_collation.py b/tests/integration/test_pad_collation.py similarity index 100% rename from tests/test_pad_collation.py rename to tests/integration/test_pad_collation.py diff --git a/tests/test_reg_loss_integration.py b/tests/integration/test_reg_loss_integration.py similarity index 99% rename from tests/test_reg_loss_integration.py rename to tests/integration/test_reg_loss_integration.py index c29b29de43..47d2a8df80 100644 --- a/tests/test_reg_loss_integration.py +++ b/tests/integration/test_reg_loss_integration.py @@ -32,7 +32,6 @@ class TestRegLossIntegration(unittest.TestCase): - def setUp(self): torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False @@ -62,7 +61,6 @@ def test_convergence(self, loss_type, loss_args, forward_args, pred_channels=1): # define a one layer model class OnelayerNet(nn.Module): - def __init__(self): super().__init__() self.layer = nn.Sequential( diff --git a/tests/test_retinanet_predict_utils.py b/tests/integration/test_retinanet_predict_utils.py similarity index 100% rename from tests/test_retinanet_predict_utils.py rename to tests/integration/test_retinanet_predict_utils.py diff --git a/tests/test_seg_loss_integration.py b/tests/integration/test_seg_loss_integration.py similarity index 100% rename from tests/test_seg_loss_integration.py rename to tests/integration/test_seg_loss_integration.py diff --git a/tests/test_spatial_combine_transforms.py b/tests/integration/test_spatial_combine_transforms.py similarity index 99% rename from tests/test_spatial_combine_transforms.py rename to tests/integration/test_spatial_combine_transforms.py index 6cb4c53ad4..c947f2f774 100644 --- a/tests/test_spatial_combine_transforms.py +++ b/tests/integration/test_spatial_combine_transforms.py @@ -132,7 +132,6 @@ class CombineLazyTest(unittest.TestCase): - @parameterized.expand(TEST_2D + TEST_3D) def test_combine_transforms(self, input_shape, funcs): for device in ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]: diff --git a/tests/test_testtimeaugmentation.py b/tests/integration/test_testtimeaugmentation.py similarity index 98% rename from tests/test_testtimeaugmentation.py rename to tests/integration/test_testtimeaugmentation.py index 81d5e580f7..62e4b46282 100644 --- a/tests/test_testtimeaugmentation.py +++ b/tests/integration/test_testtimeaugmentation.py @@ -52,7 +52,6 @@ class TestTestTimeAugmentation(unittest.TestCase): - @staticmethod def get_data(num_examples, input_size, data_type=np.asarray, include_label=True): custom_create_test_image_2d = partial( @@ -114,7 +113,7 @@ def test_test_time_augmentation(self): epoch_loss = 0 for batch_data in train_loader: - inputs, labels = batch_data["image"].to(device), batch_data["label"].to(device) + inputs, labels = (batch_data["image"].to(device), batch_data["label"].to(device)) optimizer.zero_grad() outputs = model(inputs) loss = loss_function(outputs, labels) diff --git a/tests/test_vis_gradbased.py b/tests/integration/test_vis_gradbased.py similarity index 100% rename from tests/test_vis_gradbased.py rename to tests/integration/test_vis_gradbased.py diff --git a/tests/test_vista3d_utils.py b/tests/integration/test_vista3d_utils.py similarity index 99% rename from tests/test_vista3d_utils.py rename to tests/integration/test_vista3d_utils.py index 191c306957..0ae8e28b4e 100644 --- a/tests/test_vista3d_utils.py +++ b/tests/integration/test_vista3d_utils.py @@ -114,7 +114,6 @@ @skipUnless(has_measure or cucim_skimage, "skimage or cucim.skimage required") class TestSamplePointsFromLabel(unittest.TestCase): - @parameterized.expand(TESTS_SAMPLE_POINTS_FROM_LABEL) def test_shape(self, input_data, expected_point_shape, expected_point_label_shape): point, point_label = sample_points_from_label(**input_data) @@ -123,7 +122,6 @@ def test_shape(self, input_data, expected_point_shape, expected_point_label_shap class TestConvertPointsToDisc(unittest.TestCase): - @parameterized.expand(TEST_CONVERT_POINTS_TO_DISC) def test_shape(self, input_data, expected_shape): result = convert_points_to_disc(**input_data) @@ -140,7 +138,6 @@ def test_value(self, input_data, points): @skipUnless(has_measure or cucim_skimage, "skimage or cucim.skimage required") class TestKeepMergeComponentsWithPoints(unittest.TestCase): - @skip_if_quick @skip_if_no_cuda @skipUnless(has_cp and cucim_skimage, "cupy and cucim.skimage required") diff --git a/tests/lazy_transforms_utils.py b/tests/lazy_transforms_utils.py index 41a365fc4e..3a737df201 100644 --- a/tests/lazy_transforms_utils.py +++ b/tests/lazy_transforms_utils.py @@ -64,7 +64,7 @@ def test_resampler_lazy( resampler.lazy = True pending_output = resampler(**deepcopy(call_param)) if output_idx is not None: - expected_output, pending_output = expected_output[output_idx], pending_output[output_idx] + expected_output, pending_output = (expected_output[output_idx], pending_output[output_idx]) if output_key is not None: non_lazy_out, lazy_out = expected_output[output_key], pending_output[output_key] else: diff --git a/tests/losses/__init__.py b/tests/losses/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/losses/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/losses/deform/__init__.py b/tests/losses/deform/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/losses/deform/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_bending_energy.py b/tests/losses/deform/test_bending_energy.py similarity index 100% rename from tests/test_bending_energy.py rename to tests/losses/deform/test_bending_energy.py diff --git a/tests/test_diffusion_loss.py b/tests/losses/deform/test_diffusion_loss.py similarity index 100% rename from tests/test_diffusion_loss.py rename to tests/losses/deform/test_diffusion_loss.py diff --git a/tests/losses/image_dissimilarity/__init__.py b/tests/losses/image_dissimilarity/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/losses/image_dissimilarity/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_global_mutual_information_loss.py b/tests/losses/image_dissimilarity/test_global_mutual_information_loss.py similarity index 97% rename from tests/test_global_mutual_information_loss.py rename to tests/losses/image_dissimilarity/test_global_mutual_information_loss.py index fdbfa63c34..44e73a477c 100644 --- a/tests/test_global_mutual_information_loss.py +++ b/tests/losses/image_dissimilarity/test_global_mutual_information_loss.py @@ -12,6 +12,7 @@ import os import unittest +from pathlib import Path import numpy as np import torch @@ -23,7 +24,8 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu" -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", "temp_" + "mri.nii") +TESTS_PATH = Path(__file__).parents[2] +FILE_PATH = os.path.join(TESTS_PATH, "testing_data", "temp_" + "mri.nii") EXPECTED_VALUE = { "xyz_translation": [ @@ -55,7 +57,6 @@ @skip_if_quick class TestGlobalMutualInformationLoss(unittest.TestCase): - def setUp(self): config = testing_data_config("images", "Prostate_T2W_AX_1") download_url_or_skip_test( @@ -116,7 +117,6 @@ def transformation(translate_params=(0.0, 0.0, 0.0), rotate_params=(0.0, 0.0, 0. class TestGlobalMutualInformationLossIll(unittest.TestCase): - @parameterized.expand( [ (torch.ones((1, 2), dtype=torch.float), torch.ones((1, 3), dtype=torch.float)), # mismatched_simple_dims diff --git a/tests/test_local_normalized_cross_correlation_loss.py b/tests/losses/image_dissimilarity/test_local_normalized_cross_correlation_loss.py similarity index 100% rename from tests/test_local_normalized_cross_correlation_loss.py rename to tests/losses/image_dissimilarity/test_local_normalized_cross_correlation_loss.py diff --git a/tests/test_adversarial_loss.py b/tests/losses/test_adversarial_loss.py similarity index 100% rename from tests/test_adversarial_loss.py rename to tests/losses/test_adversarial_loss.py diff --git a/tests/test_barlow_twins_loss.py b/tests/losses/test_barlow_twins_loss.py similarity index 100% rename from tests/test_barlow_twins_loss.py rename to tests/losses/test_barlow_twins_loss.py diff --git a/tests/test_cldice_loss.py b/tests/losses/test_cldice_loss.py similarity index 100% rename from tests/test_cldice_loss.py rename to tests/losses/test_cldice_loss.py diff --git a/tests/test_contrastive_loss.py b/tests/losses/test_contrastive_loss.py similarity index 100% rename from tests/test_contrastive_loss.py rename to tests/losses/test_contrastive_loss.py diff --git a/tests/test_dice_ce_loss.py b/tests/losses/test_dice_ce_loss.py similarity index 100% rename from tests/test_dice_ce_loss.py rename to tests/losses/test_dice_ce_loss.py diff --git a/tests/test_dice_focal_loss.py b/tests/losses/test_dice_focal_loss.py similarity index 99% rename from tests/test_dice_focal_loss.py rename to tests/losses/test_dice_focal_loss.py index e04b0c1d56..98ea475ded 100644 --- a/tests/test_dice_focal_loss.py +++ b/tests/losses/test_dice_focal_loss.py @@ -22,7 +22,6 @@ class TestDiceFocalLoss(unittest.TestCase): - def test_result_onehot_target_include_bg(self): size = [3, 3, 5, 5] label = torch.randint(low=0, high=2, size=size) diff --git a/tests/test_dice_loss.py b/tests/losses/test_dice_loss.py similarity index 99% rename from tests/test_dice_loss.py rename to tests/losses/test_dice_loss.py index 294312d214..66c038783a 100644 --- a/tests/test_dice_loss.py +++ b/tests/losses/test_dice_loss.py @@ -184,7 +184,6 @@ class TestDiceLoss(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_data, expected_val): result = DiceLoss(**input_param).forward(**input_data) diff --git a/tests/test_ds_loss.py b/tests/losses/test_ds_loss.py similarity index 99% rename from tests/test_ds_loss.py rename to tests/losses/test_ds_loss.py index 5f4daaae81..6b91dff17a 100644 --- a/tests/test_ds_loss.py +++ b/tests/losses/test_ds_loss.py @@ -135,7 +135,6 @@ class TestDSLossDiceCE(unittest.TestCase): - @parameterized.expand(TEST_CASES_DICECE) def test_result(self, input_param, input_param2, input_data, expected_val): diceceloss = DeepSupervisionLoss(DiceCELoss(**input_param), **input_param2) @@ -161,7 +160,6 @@ def test_script(self): @SkipIfBeforePyTorchVersion((1, 11)) class TestDSLossDiceCE2(unittest.TestCase): - @parameterized.expand(TEST_CASES_DICECE2) def test_result(self, input_param, input_param2, input_data, expected_val): diceceloss = DeepSupervisionLoss(DiceCELoss(**input_param), **input_param2) @@ -171,7 +169,6 @@ def test_result(self, input_param, input_param2, input_data, expected_val): @SkipIfBeforePyTorchVersion((1, 11)) class TestDSLossDice(unittest.TestCase): - @parameterized.expand(TEST_CASES_DICE) def test_result(self, input_param, input_data, expected_val): loss = DeepSupervisionLoss(DiceLoss(**input_param)) @@ -181,7 +178,6 @@ def test_result(self, input_param, input_data, expected_val): @SkipIfBeforePyTorchVersion((1, 11)) class TestDSLossDiceFocal(unittest.TestCase): - @parameterized.expand(TEST_CASES_DICEFOCAL) def test_result(self, input_param, input_data, expected_val): loss = DeepSupervisionLoss(DiceFocalLoss(**input_param)) diff --git a/tests/test_focal_loss.py b/tests/losses/test_focal_loss.py similarity index 99% rename from tests/test_focal_loss.py rename to tests/losses/test_focal_loss.py index 9d9ed43101..e7f447d90e 100644 --- a/tests/test_focal_loss.py +++ b/tests/losses/test_focal_loss.py @@ -79,7 +79,6 @@ class TestFocalLoss(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_result(self, input_param, input_data, expected_val): focal_loss = FocalLoss(**input_param) diff --git a/tests/test_generalized_dice_focal_loss.py b/tests/losses/test_generalized_dice_focal_loss.py similarity index 99% rename from tests/test_generalized_dice_focal_loss.py rename to tests/losses/test_generalized_dice_focal_loss.py index 93f9f6f6fa..2af4aa68db 100644 --- a/tests/test_generalized_dice_focal_loss.py +++ b/tests/losses/test_generalized_dice_focal_loss.py @@ -21,7 +21,6 @@ class TestGeneralizedDiceFocalLoss(unittest.TestCase): - def test_result_onehot_target_include_bg(self): size = [3, 3, 5, 5] label = torch.randint(low=0, high=2, size=size) diff --git a/tests/test_generalized_dice_loss.py b/tests/losses/test_generalized_dice_loss.py similarity index 99% rename from tests/test_generalized_dice_loss.py rename to tests/losses/test_generalized_dice_loss.py index 23af96762f..8549e87482 100644 --- a/tests/test_generalized_dice_loss.py +++ b/tests/losses/test_generalized_dice_loss.py @@ -158,7 +158,6 @@ class TestGeneralizedDiceLoss(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_data, expected_val): result = GeneralizedDiceLoss(**input_param).forward(**input_data) diff --git a/tests/test_generalized_wasserstein_dice_loss.py b/tests/losses/test_generalized_wasserstein_dice_loss.py similarity index 99% rename from tests/test_generalized_wasserstein_dice_loss.py rename to tests/losses/test_generalized_wasserstein_dice_loss.py index 3b56c1315e..6868c04775 100644 --- a/tests/test_generalized_wasserstein_dice_loss.py +++ b/tests/losses/test_generalized_wasserstein_dice_loss.py @@ -24,7 +24,6 @@ class TestGeneralizedWassersteinDiceLoss(unittest.TestCase): - def test_bin_seg_2d(self): target = torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]) @@ -161,7 +160,6 @@ def test_convergence(self): # define a model with one layer class OnelayerNet(nn.Module): - def __init__(self): super().__init__() self.layer = nn.Linear(num_voxels, num_voxels * num_classes) diff --git a/tests/test_giou_loss.py b/tests/losses/test_giou_loss.py similarity index 100% rename from tests/test_giou_loss.py rename to tests/losses/test_giou_loss.py diff --git a/tests/test_hausdorff_loss.py b/tests/losses/test_hausdorff_loss.py similarity index 100% rename from tests/test_hausdorff_loss.py rename to tests/losses/test_hausdorff_loss.py diff --git a/tests/test_masked_dice_loss.py b/tests/losses/test_masked_dice_loss.py similarity index 100% rename from tests/test_masked_dice_loss.py rename to tests/losses/test_masked_dice_loss.py diff --git a/tests/test_masked_loss.py b/tests/losses/test_masked_loss.py similarity index 99% rename from tests/test_masked_loss.py rename to tests/losses/test_masked_loss.py index aaba9969a5..e9128a705c 100644 --- a/tests/test_masked_loss.py +++ b/tests/losses/test_masked_loss.py @@ -40,7 +40,6 @@ class TestMaskedLoss(unittest.TestCase): - def setUp(self): set_determinism(0) diff --git a/tests/test_multi_scale.py b/tests/losses/test_multi_scale.py similarity index 99% rename from tests/test_multi_scale.py rename to tests/losses/test_multi_scale.py index e57e62a3ad..348a9f9913 100644 --- a/tests/test_multi_scale.py +++ b/tests/losses/test_multi_scale.py @@ -52,7 +52,6 @@ class TestMultiScale(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_data, expected_val): result = MultiScaleLoss(**input_param).forward(**input_data) diff --git a/tests/test_nacl_loss.py b/tests/losses/test_nacl_loss.py similarity index 100% rename from tests/test_nacl_loss.py rename to tests/losses/test_nacl_loss.py diff --git a/tests/test_perceptual_loss.py b/tests/losses/test_perceptual_loss.py similarity index 99% rename from tests/test_perceptual_loss.py rename to tests/losses/test_perceptual_loss.py index 30907e8468..2d11d41bee 100644 --- a/tests/test_perceptual_loss.py +++ b/tests/losses/test_perceptual_loss.py @@ -77,7 +77,6 @@ @unittest.skipUnless(has_torchvision, "Requires torchvision") @skip_if_quick class TestPerceptualLoss(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_shape, target_shape): with skip_if_downloading_fails(): diff --git a/tests/test_spectral_loss.py b/tests/losses/test_spectral_loss.py similarity index 99% rename from tests/test_spectral_loss.py rename to tests/losses/test_spectral_loss.py index dbc64ca73b..8a4988a30d 100644 --- a/tests/test_spectral_loss.py +++ b/tests/losses/test_spectral_loss.py @@ -63,7 +63,6 @@ class TestJukeboxLoss(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_results(self, input_param, input_data, expected_val): results = JukeboxLoss(**input_param).forward(**input_data) diff --git a/tests/test_ssim_loss.py b/tests/losses/test_ssim_loss.py similarity index 100% rename from tests/test_ssim_loss.py rename to tests/losses/test_ssim_loss.py diff --git a/tests/test_sure_loss.py b/tests/losses/test_sure_loss.py similarity index 100% rename from tests/test_sure_loss.py rename to tests/losses/test_sure_loss.py diff --git a/tests/test_tversky_loss.py b/tests/losses/test_tversky_loss.py similarity index 99% rename from tests/test_tversky_loss.py rename to tests/losses/test_tversky_loss.py index 29c54fd0fc..32303434ca 100644 --- a/tests/test_tversky_loss.py +++ b/tests/losses/test_tversky_loss.py @@ -164,7 +164,6 @@ class TestTverskyLoss(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_data, expected_val): result = TverskyLoss(**input_param).forward(**input_data) diff --git a/tests/test_unified_focal_loss.py b/tests/losses/test_unified_focal_loss.py similarity index 100% rename from tests/test_unified_focal_loss.py rename to tests/losses/test_unified_focal_loss.py diff --git a/tests/metrics/__init__.py b/tests/metrics/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/metrics/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_compute_confusion_matrix.py b/tests/metrics/test_compute_confusion_matrix.py similarity index 99% rename from tests/test_compute_confusion_matrix.py rename to tests/metrics/test_compute_confusion_matrix.py index 5b06bb88cd..2486049d79 100644 --- a/tests/test_compute_confusion_matrix.py +++ b/tests/metrics/test_compute_confusion_matrix.py @@ -220,7 +220,6 @@ class TestConfusionMatrix(unittest.TestCase): - @parameterized.expand([TEST_CASE_CONFUSION_MATRIX]) def test_value(self, input_data, expected_value): # include or ignore background diff --git a/tests/test_compute_f_beta.py b/tests/metrics/test_compute_f_beta.py similarity index 99% rename from tests/test_compute_f_beta.py rename to tests/metrics/test_compute_f_beta.py index 071c8963f2..21a4e3c22c 100644 --- a/tests/test_compute_f_beta.py +++ b/tests/metrics/test_compute_f_beta.py @@ -24,7 +24,6 @@ class TestFBetaScore(unittest.TestCase): - def test_expecting_success_and_device(self): metric = FBetaScore() y_pred = torch.tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]], device=_device) diff --git a/tests/test_compute_fid_metric.py b/tests/metrics/test_compute_fid_metric.py similarity index 100% rename from tests/test_compute_fid_metric.py rename to tests/metrics/test_compute_fid_metric.py diff --git a/tests/test_compute_froc.py b/tests/metrics/test_compute_froc.py similarity index 100% rename from tests/test_compute_froc.py rename to tests/metrics/test_compute_froc.py diff --git a/tests/test_compute_generalized_dice.py b/tests/metrics/test_compute_generalized_dice.py similarity index 100% rename from tests/test_compute_generalized_dice.py rename to tests/metrics/test_compute_generalized_dice.py diff --git a/tests/test_compute_meandice.py b/tests/metrics/test_compute_meandice.py similarity index 100% rename from tests/test_compute_meandice.py rename to tests/metrics/test_compute_meandice.py diff --git a/tests/test_compute_meaniou.py b/tests/metrics/test_compute_meaniou.py similarity index 100% rename from tests/test_compute_meaniou.py rename to tests/metrics/test_compute_meaniou.py diff --git a/tests/test_compute_mmd_metric.py b/tests/metrics/test_compute_mmd_metric.py similarity index 100% rename from tests/test_compute_mmd_metric.py rename to tests/metrics/test_compute_mmd_metric.py diff --git a/tests/test_compute_multiscalessim_metric.py b/tests/metrics/test_compute_multiscalessim_metric.py similarity index 100% rename from tests/test_compute_multiscalessim_metric.py rename to tests/metrics/test_compute_multiscalessim_metric.py diff --git a/tests/test_compute_panoptic_quality.py b/tests/metrics/test_compute_panoptic_quality.py similarity index 98% rename from tests/test_compute_panoptic_quality.py rename to tests/metrics/test_compute_panoptic_quality.py index 304c72e574..2c0946a822 100644 --- a/tests/test_compute_panoptic_quality.py +++ b/tests/metrics/test_compute_panoptic_quality.py @@ -12,7 +12,6 @@ from __future__ import annotations import unittest -from typing import List import numpy as np import torch @@ -92,7 +91,6 @@ @SkipIfNoModule("scipy.optimize") class TestPanopticQualityMetric(unittest.TestCase): - @parameterized.expand([TEST_FUNC_CASE_1, TEST_FUNC_CASE_2, TEST_FUNC_CASE_3, TEST_FUNC_CASE_4]) def test_value(self, input_params, expected_value): result = compute_panoptic_quality(**input_params) @@ -104,7 +102,7 @@ def test_value_class(self, input_params, y_pred, y_gt, expected_value): metric = PanopticQualityMetric(**input_params) metric(y_pred, y_gt) outputs = metric.aggregate() - if isinstance(outputs, List): + if isinstance(outputs, list): for output, value in zip(outputs, expected_value): np.testing.assert_allclose(output.cpu().numpy(), np.asarray(value), atol=1e-4) else: diff --git a/tests/test_compute_regression_metrics.py b/tests/metrics/test_compute_regression_metrics.py similarity index 100% rename from tests/test_compute_regression_metrics.py rename to tests/metrics/test_compute_regression_metrics.py diff --git a/tests/test_compute_roc_auc.py b/tests/metrics/test_compute_roc_auc.py similarity index 100% rename from tests/test_compute_roc_auc.py rename to tests/metrics/test_compute_roc_auc.py diff --git a/tests/test_compute_variance.py b/tests/metrics/test_compute_variance.py similarity index 100% rename from tests/test_compute_variance.py rename to tests/metrics/test_compute_variance.py diff --git a/tests/test_cumulative.py b/tests/metrics/test_cumulative.py similarity index 99% rename from tests/test_cumulative.py rename to tests/metrics/test_cumulative.py index ffa5cf312f..bcacee9b22 100644 --- a/tests/test_cumulative.py +++ b/tests/metrics/test_cumulative.py @@ -20,7 +20,6 @@ class TestCumulative(unittest.TestCase): - def test_single(self): c = Cumulative() c.extend([2, 3]) diff --git a/tests/test_cumulative_average.py b/tests/metrics/test_cumulative_average.py similarity index 100% rename from tests/test_cumulative_average.py rename to tests/metrics/test_cumulative_average.py diff --git a/tests/test_cumulative_average_dist.py b/tests/metrics/test_cumulative_average_dist.py similarity index 99% rename from tests/test_cumulative_average_dist.py rename to tests/metrics/test_cumulative_average_dist.py index ddbfa1b9b1..53e81b7d86 100644 --- a/tests/test_cumulative_average_dist.py +++ b/tests/metrics/test_cumulative_average_dist.py @@ -23,7 +23,6 @@ @SkipIfBeforePyTorchVersion((1, 8)) class DistributedCumulativeAverage(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_value(self): rank = dist.get_rank() diff --git a/tests/test_hausdorff_distance.py b/tests/metrics/test_hausdorff_distance.py similarity index 100% rename from tests/test_hausdorff_distance.py rename to tests/metrics/test_hausdorff_distance.py diff --git a/tests/test_label_quality_score.py b/tests/metrics/test_label_quality_score.py similarity index 100% rename from tests/test_label_quality_score.py rename to tests/metrics/test_label_quality_score.py diff --git a/tests/test_loss_metric.py b/tests/metrics/test_loss_metric.py similarity index 100% rename from tests/test_loss_metric.py rename to tests/metrics/test_loss_metric.py diff --git a/tests/test_metrics_reloaded.py b/tests/metrics/test_metrics_reloaded.py similarity index 100% rename from tests/test_metrics_reloaded.py rename to tests/metrics/test_metrics_reloaded.py diff --git a/tests/test_ssim_metric.py b/tests/metrics/test_ssim_metric.py similarity index 100% rename from tests/test_ssim_metric.py rename to tests/metrics/test_ssim_metric.py diff --git a/tests/test_surface_dice.py b/tests/metrics/test_surface_dice.py similarity index 99% rename from tests/test_surface_dice.py rename to tests/metrics/test_surface_dice.py index 736548117e..01f80bd01e 100644 --- a/tests/test_surface_dice.py +++ b/tests/metrics/test_surface_dice.py @@ -24,7 +24,6 @@ class TestAllSurfaceDiceMetrics(unittest.TestCase): - def test_tolerance_euclidean_distance_with_spacing(self): batch_size = 2 n_class = 2 @@ -384,7 +383,7 @@ def test_not_predicted_not_present(self): np.testing.assert_equal(not_nans, torch.tensor([0], dtype=torch.float)) def test_compute_surface_dice_subvoxel(self): - mask_gt, mask_pred = torch.zeros(1, 1, 128, 128, 128), torch.zeros(1, 1, 128, 128, 128) + mask_gt, mask_pred = (torch.zeros(1, 1, 128, 128, 128), torch.zeros(1, 1, 128, 128, 128)) mask_gt[0, 0, 50, 60, 70] = 1 res = compute_surface_dice( mask_pred, mask_gt, [1.0], include_background=True, spacing=(3, 2, 1), use_subvoxels=True @@ -404,7 +403,7 @@ def test_compute_surface_dice_subvoxel(self): assert_allclose(res, 0.5, type_test=False) d = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - mask_gt, mask_pred = torch.zeros(1, 1, 100, 100, 100, device=d), torch.zeros(1, 1, 100, 100, 100, device=d) + mask_gt, mask_pred = (torch.zeros(1, 1, 100, 100, 100, device=d), torch.zeros(1, 1, 100, 100, 100, device=d)) mask_gt[0, 0, 0:50, :, :] = 1 mask_pred[0, 0, 0:51, :, :] = 1 res = compute_surface_dice( diff --git a/tests/test_surface_distance.py b/tests/metrics/test_surface_distance.py similarity index 100% rename from tests/test_surface_distance.py rename to tests/metrics/test_surface_distance.py diff --git a/tests/min_tests.py b/tests/min_tests.py index f39d3f9843..1fc3da4a19 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -11,10 +11,10 @@ from __future__ import annotations -import glob import os import sys import unittest +from pathlib import Path def run_testsuit(): @@ -216,17 +216,20 @@ def run_testsuit(): ] assert sorted(exclude_cases) == sorted(set(exclude_cases)), f"Duplicated items in {exclude_cases}" - files = glob.glob(os.path.join(os.path.dirname(__file__), "test_*.py")) + files = [f.relative_to(Path(__file__).parent.parent) for f in Path(__file__).parent.rglob("test_*.py")] + files = [str(f).replace(os.sep, ".").replace(".py", "") for f in files] cases = [] - for case in files: - test_module = os.path.basename(case)[:-3] - if test_module in exclude_cases: - exclude_cases.remove(test_module) - print(f"skipping tests.{test_module}.") + for test_module in files: + test_case = test_module.split(".")[-1] + if test_case in exclude_cases: + exclude_cases.remove(test_case) + print(f"skipping {test_module}") else: - cases.append(f"tests.{test_module}") - assert not exclude_cases, f"items in exclude_cases not used: {exclude_cases}." + print(f"adding {test_module}") + cases.append(test_module) + exclude_cases = [str(list(Path(__file__).parent.rglob(f"*{et}*"))[0]) for et in exclude_cases] + assert not exclude_cases, f"items in exclude_cases not used: {' '.join(exclude_cases)}" test_suite = unittest.TestLoader().loadTestsFromNames(cases) return test_suite diff --git a/tests/networks/__init__.py b/tests/networks/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/networks/blocks/__init__.py b/tests/networks/blocks/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/blocks/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/networks/blocks/dints_block/__init__.py b/tests/networks/blocks/dints_block/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/blocks/dints_block/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_acn_block.py b/tests/networks/blocks/dints_block/test_acn_block.py similarity index 100% rename from tests/test_acn_block.py rename to tests/networks/blocks/dints_block/test_acn_block.py diff --git a/tests/test_factorized_increase.py b/tests/networks/blocks/dints_block/test_factorized_increase.py similarity index 100% rename from tests/test_factorized_increase.py rename to tests/networks/blocks/dints_block/test_factorized_increase.py diff --git a/tests/test_factorized_reduce.py b/tests/networks/blocks/dints_block/test_factorized_reduce.py similarity index 100% rename from tests/test_factorized_reduce.py rename to tests/networks/blocks/dints_block/test_factorized_reduce.py diff --git a/tests/test_p3d_block.py b/tests/networks/blocks/dints_block/test_p3d_block.py similarity index 100% rename from tests/test_p3d_block.py rename to tests/networks/blocks/dints_block/test_p3d_block.py diff --git a/tests/test_adn.py b/tests/networks/blocks/test_adn.py similarity index 99% rename from tests/test_adn.py rename to tests/networks/blocks/test_adn.py index 6ff8042c69..d2839957fd 100644 --- a/tests/test_adn.py +++ b/tests/networks/blocks/test_adn.py @@ -59,7 +59,6 @@ class TestADN2D(TorchImageTestCase2D): - @parameterized.expand(TEST_CASES_2D) def test_adn_2d(self, args): adn = ADN(**args) @@ -74,7 +73,6 @@ def test_no_input(self): class TestADN3D(TorchImageTestCase3D): - @parameterized.expand(TEST_CASES_3D) def test_adn_3d(self, args): adn = ADN(**args) diff --git a/tests/test_convolutions.py b/tests/networks/blocks/test_convolutions.py similarity index 99% rename from tests/test_convolutions.py rename to tests/networks/blocks/test_convolutions.py index 90695d9dd5..f882f133e9 100644 --- a/tests/test_convolutions.py +++ b/tests/networks/blocks/test_convolutions.py @@ -18,7 +18,6 @@ class TestConvolution2D(TorchImageTestCase2D): - def test_conv1(self): conv = Convolution(2, self.input_channels, self.output_channels) out = conv(self.imt) @@ -70,7 +69,6 @@ def test_transpose2(self): class TestConvolution3D(TorchImageTestCase3D): - def test_conv1(self): conv = Convolution(3, self.input_channels, self.output_channels, dropout=0.1, adn_ordering="DAN") out = conv(self.imt) @@ -128,7 +126,6 @@ def test_transpose2(self): class TestResidualUnit2D(TorchImageTestCase2D): - def test_conv_only1(self): conv = ResidualUnit(2, 1, self.output_channels) out = conv(self.imt) diff --git a/tests/test_crf_cpu.py b/tests/networks/blocks/test_crf_cpu.py similarity index 99% rename from tests/test_crf_cpu.py rename to tests/networks/blocks/test_crf_cpu.py index 2dedd12eaf..2001700e5d 100644 --- a/tests/test_crf_cpu.py +++ b/tests/networks/blocks/test_crf_cpu.py @@ -495,7 +495,6 @@ @skip_if_no_cpp_extension class CRFTestCaseCpu(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test(self, test_case_description, params, input, features, expected): # Create input tensors diff --git a/tests/test_crf_cuda.py b/tests/networks/blocks/test_crf_cuda.py similarity index 99% rename from tests/test_crf_cuda.py rename to tests/networks/blocks/test_crf_cuda.py index e1114f65fd..d1fc9472d2 100644 --- a/tests/test_crf_cuda.py +++ b/tests/networks/blocks/test_crf_cuda.py @@ -496,7 +496,6 @@ @skip_if_no_cpp_extension @skip_if_no_cuda class CRFTestCaseCuda(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test(self, test_case_description, params, input, features, expected): # Create input tensors diff --git a/tests/test_crossattention.py b/tests/networks/blocks/test_crossattention.py similarity index 99% rename from tests/test_crossattention.py rename to tests/networks/blocks/test_crossattention.py index 8ea7c33fea..32cd655d4c 100644 --- a/tests/test_crossattention.py +++ b/tests/networks/blocks/test_crossattention.py @@ -49,7 +49,6 @@ class TestResBlock(unittest.TestCase): - @parameterized.expand(TEST_CASE_CABLOCK) @skipUnless(has_einops, "Requires einops") @SkipIfBeforePyTorchVersion((2, 0)) diff --git a/tests/test_denseblock.py b/tests/networks/blocks/test_denseblock.py similarity index 99% rename from tests/test_denseblock.py rename to tests/networks/blocks/test_denseblock.py index 2f80954983..884df1d511 100644 --- a/tests/test_denseblock.py +++ b/tests/networks/blocks/test_denseblock.py @@ -20,7 +20,6 @@ class TestDenseBlock2D(TorchImageTestCase2D): - def test_block_empty(self): block = DenseBlock([]) out = block(self.imt) @@ -37,7 +36,6 @@ def test_block_conv(self): class TestDenseBlock3D(TorchImageTestCase3D): - def test_block_conv(self): conv1 = nn.Conv3d(self.input_channels, self.output_channels, 3, padding=1) conv2 = nn.Conv3d(self.input_channels + self.output_channels, self.input_channels, 3, padding=1) @@ -54,7 +52,6 @@ def test_block_conv(self): class TestConvDenseBlock2D(TorchImageTestCase2D): - def test_block_empty(self): conv = ConvDenseBlock(spatial_dims=2, in_channels=self.input_channels, channels=[]) out = conv(self.imt) @@ -82,7 +79,6 @@ def test_block2(self): class TestConvDenseBlock3D(TorchImageTestCase3D): - def test_block_empty(self): conv = ConvDenseBlock(spatial_dims=3, in_channels=self.input_channels, channels=[]) out = conv(self.imt) diff --git a/tests/test_dynunet_block.py b/tests/networks/blocks/test_dynunet_block.py similarity index 99% rename from tests/test_dynunet_block.py rename to tests/networks/blocks/test_dynunet_block.py index af15e268e1..d469c6f3e9 100644 --- a/tests/test_dynunet_block.py +++ b/tests/networks/blocks/test_dynunet_block.py @@ -73,7 +73,6 @@ class TestResBasicBlock(unittest.TestCase): - @parameterized.expand(TEST_CASE_RES_BASIC_BLOCK) def test_shape(self, input_param, input_shape, expected_shape): for net in [UnetResBlock(**input_param), UnetBasicBlock(**input_param)]: @@ -97,7 +96,6 @@ def test_script(self): class TestUpBlock(unittest.TestCase): - @parameterized.expand(TEST_UP_BLOCK) def test_shape(self, input_param, input_shape, expected_shape, skip_shape): net = UnetUpBlock(**input_param) diff --git a/tests/test_fpn_block.py b/tests/networks/blocks/test_fpn_block.py similarity index 99% rename from tests/test_fpn_block.py rename to tests/networks/blocks/test_fpn_block.py index b3894ebf6a..6980889433 100644 --- a/tests/test_fpn_block.py +++ b/tests/networks/blocks/test_fpn_block.py @@ -44,7 +44,6 @@ class TestFPNBlock(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_fpn_block(self, input_param, input_shape, expected_shape): net = FeaturePyramidNetwork(**input_param) @@ -68,7 +67,6 @@ def test_script(self, input_param, input_shape, expected_shape): @unittest.skipUnless(has_torchvision, "Requires torchvision") class TestFPN(unittest.TestCase): - @parameterized.expand(TEST_CASES2) def test_fpn(self, input_param, input_shape, expected_shape): net = _resnet_fpn_extractor(backbone=resnet50(), spatial_dims=input_param["spatial_dims"], returned_layers=[1]) diff --git a/tests/test_localnet_block.py b/tests/networks/blocks/test_localnet_block.py similarity index 100% rename from tests/test_localnet_block.py rename to tests/networks/blocks/test_localnet_block.py diff --git a/tests/test_mlp.py b/tests/networks/blocks/test_mlp.py similarity index 100% rename from tests/test_mlp.py rename to tests/networks/blocks/test_mlp.py diff --git a/tests/test_patchembedding.py b/tests/networks/blocks/test_patchembedding.py similarity index 99% rename from tests/test_patchembedding.py rename to tests/networks/blocks/test_patchembedding.py index 30e4b11883..95ca95b36a 100644 --- a/tests/test_patchembedding.py +++ b/tests/networks/blocks/test_patchembedding.py @@ -77,7 +77,6 @@ @SkipIfBeforePyTorchVersion((1, 11, 1)) class TestPatchEmbeddingBlock(unittest.TestCase): - def setUp(self): self.threads = torch.get_num_threads() torch.set_num_threads(4) @@ -189,7 +188,6 @@ def test_ill_arg(self): class TestPatchEmbed(unittest.TestCase): - def setUp(self): self.threads = torch.get_num_threads() torch.set_num_threads(4) diff --git a/tests/test_regunet_block.py b/tests/networks/blocks/test_regunet_block.py similarity index 100% rename from tests/test_regunet_block.py rename to tests/networks/blocks/test_regunet_block.py diff --git a/tests/test_se_block.py b/tests/networks/blocks/test_se_block.py similarity index 99% rename from tests/test_se_block.py rename to tests/networks/blocks/test_se_block.py index 0b0ac63f16..d799cd095c 100644 --- a/tests/test_se_block.py +++ b/tests/networks/blocks/test_se_block.py @@ -63,7 +63,6 @@ class TestSEBlockLayer(unittest.TestCase): - @parameterized.expand(TEST_CASES + TEST_CASES_3D) def test_shape(self, input_param, input_shape, expected_shape): net = SEBlock(**input_param).to(device) diff --git a/tests/test_se_blocks.py b/tests/networks/blocks/test_se_blocks.py similarity index 99% rename from tests/test_se_blocks.py rename to tests/networks/blocks/test_se_blocks.py index 12d4d1a36d..b40f3a0955 100644 --- a/tests/test_se_blocks.py +++ b/tests/networks/blocks/test_se_blocks.py @@ -41,7 +41,6 @@ class TestChannelSELayer(unittest.TestCase): - @parameterized.expand(TEST_CASES + TEST_CASES_3D) def test_shape(self, input_param, input_shape, expected_shape): net = ChannelSELayer(**input_param) @@ -61,7 +60,6 @@ def test_ill_arg(self): class TestResidualSELayer(unittest.TestCase): - @parameterized.expand(TEST_CASES[:1]) def test_shape(self, input_param, input_shape, expected_shape): net = ResidualSELayer(**input_param) diff --git a/tests/test_segresnet_block.py b/tests/networks/blocks/test_segresnet_block.py similarity index 100% rename from tests/test_segresnet_block.py rename to tests/networks/blocks/test_segresnet_block.py diff --git a/tests/test_selfattention.py b/tests/networks/blocks/test_selfattention.py similarity index 99% rename from tests/test_selfattention.py rename to tests/networks/blocks/test_selfattention.py index 21302141e0..494f64cad8 100644 --- a/tests/test_selfattention.py +++ b/tests/networks/blocks/test_selfattention.py @@ -52,7 +52,6 @@ class TestResBlock(unittest.TestCase): - @parameterized.expand(TEST_CASE_SABLOCK) @skipUnless(has_einops, "Requires einops") @SkipIfBeforePyTorchVersion((2, 0)) @@ -163,7 +162,6 @@ def test_access_attn_matrix(self): assert matrix_acess_blk.att_mat.shape == (input_shape[0], input_shape[0], input_shape[1], input_shape[1]) def test_number_of_parameters(self): - def count_sablock_params(*args, **kwargs): """Count the number of parameters in a SABlock.""" sablock = SABlock(*args, **kwargs) diff --git a/tests/test_simple_aspp.py b/tests/networks/blocks/test_simple_aspp.py similarity index 100% rename from tests/test_simple_aspp.py rename to tests/networks/blocks/test_simple_aspp.py diff --git a/tests/test_spatialattention.py b/tests/networks/blocks/test_spatialattention.py similarity index 100% rename from tests/test_spatialattention.py rename to tests/networks/blocks/test_spatialattention.py diff --git a/tests/test_subpixel_upsample.py b/tests/networks/blocks/test_subpixel_upsample.py similarity index 99% rename from tests/test_subpixel_upsample.py rename to tests/networks/blocks/test_subpixel_upsample.py index 5702f3f182..9300ff2b43 100644 --- a/tests/test_subpixel_upsample.py +++ b/tests/networks/blocks/test_subpixel_upsample.py @@ -68,7 +68,6 @@ class TestSUBPIXEL(unittest.TestCase): - @parameterized.expand(TEST_CASE_SUBPIXEL) def test_subpixel_shape(self, input_param, input_shape, expected_shape): net = SubpixelUpsample(**input_param) diff --git a/tests/test_text_encoding.py b/tests/networks/blocks/test_text_encoding.py similarity index 99% rename from tests/test_text_encoding.py rename to tests/networks/blocks/test_text_encoding.py index 83093b151f..f9112d9814 100644 --- a/tests/test_text_encoding.py +++ b/tests/networks/blocks/test_text_encoding.py @@ -18,7 +18,6 @@ class TestTextEncoder(unittest.TestCase): - def test_test_encoding_shape(self): with skip_if_downloading_fails(): # test 2D encoder diff --git a/tests/test_transformerblock.py b/tests/networks/blocks/test_transformerblock.py similarity index 100% rename from tests/test_transformerblock.py rename to tests/networks/blocks/test_transformerblock.py diff --git a/tests/test_unetr_block.py b/tests/networks/blocks/test_unetr_block.py similarity index 99% rename from tests/test_unetr_block.py rename to tests/networks/blocks/test_unetr_block.py index d6cab1b1c4..1396a08193 100644 --- a/tests/test_unetr_block.py +++ b/tests/networks/blocks/test_unetr_block.py @@ -102,7 +102,6 @@ class TestResBasicBlock(unittest.TestCase): - @parameterized.expand(TEST_CASE_UNETR_BASIC_BLOCK) def test_shape(self, input_param, input_shape, expected_shape): for net in [UnetrBasicBlock(**input_param)]: @@ -125,7 +124,6 @@ def test_script(self): class TestUpBlock(unittest.TestCase): - @parameterized.expand(TEST_UP_BLOCK) def test_shape(self, input_param, input_shape, expected_shape, skip_shape): net = UnetrUpBlock(**input_param) @@ -142,7 +140,6 @@ def test_script(self): class TestPrUpBlock(unittest.TestCase): - @parameterized.expand(TEST_PRUP_BLOCK) def test_shape(self, input_param, input_shape, expected_shape): net = UnetrPrUpBlock(**input_param) diff --git a/tests/test_upsample_block.py b/tests/networks/blocks/test_upsample_block.py similarity index 100% rename from tests/test_upsample_block.py rename to tests/networks/blocks/test_upsample_block.py diff --git a/tests/networks/blocks/warp/__init__.py b/tests/networks/blocks/warp/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/blocks/warp/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_dvf2ddf.py b/tests/networks/blocks/warp/test_dvf2ddf.py similarity index 100% rename from tests/test_dvf2ddf.py rename to tests/networks/blocks/warp/test_dvf2ddf.py diff --git a/tests/test_warp.py b/tests/networks/blocks/warp/test_warp.py similarity index 98% rename from tests/test_warp.py rename to tests/networks/blocks/warp/test_warp.py index 4d3f5d2c42..452ca5d890 100644 --- a/tests/test_warp.py +++ b/tests/networks/blocks/warp/test_warp.py @@ -10,8 +10,8 @@ # limitations under the License. from __future__ import annotations -import os import unittest +from pathlib import Path import numpy as np import torch @@ -106,7 +106,6 @@ @skip_if_quick class TestWarp(unittest.TestCase): - def setUp(self): config = testing_data_config("images", "Prostate_T2W_AX_1") download_url_or_skip_test( @@ -157,7 +156,8 @@ def test_grad(self): gradcheck(warp_layer, (input_image, ddf), atol=1e-2, eps=1e-2) -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", "temp_" + "mri.nii") +TESTS_PATH = Path(__file__).parents[3] +FILE_PATH = TESTS_PATH / "testing_data" / "temp_" / "mri.nii" def load_img_and_sample_ddf(): diff --git a/tests/networks/layers/__init__.py b/tests/networks/layers/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/layers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/networks/layers/filtering/__init__.py b/tests/networks/layers/filtering/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/layers/filtering/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_bilateral_approx_cpu.py b/tests/networks/layers/filtering/test_bilateral_approx_cpu.py similarity index 99% rename from tests/test_bilateral_approx_cpu.py rename to tests/networks/layers/filtering/test_bilateral_approx_cpu.py index 24f35990dc..51d0f56f82 100644 --- a/tests/test_bilateral_approx_cpu.py +++ b/tests/networks/layers/filtering/test_bilateral_approx_cpu.py @@ -365,7 +365,6 @@ @skip_if_no_cpp_extension class BilateralFilterTestCaseCpuApprox(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_cpu_approx(self, test_case_description, sigmas, input, expected): # Params to determine the implementation to test diff --git a/tests/test_bilateral_approx_cuda.py b/tests/networks/layers/filtering/test_bilateral_approx_cuda.py similarity index 99% rename from tests/test_bilateral_approx_cuda.py rename to tests/networks/layers/filtering/test_bilateral_approx_cuda.py index fddf7f002e..b98279f28e 100644 --- a/tests/test_bilateral_approx_cuda.py +++ b/tests/networks/layers/filtering/test_bilateral_approx_cuda.py @@ -366,7 +366,6 @@ @skip_if_no_cuda @skip_if_no_cpp_extension class BilateralFilterTestCaseCudaApprox(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_cuda_approx(self, test_case_description, sigmas, input, expected): # Skip this test diff --git a/tests/test_bilateral_precise.py b/tests/networks/layers/filtering/test_bilateral_precise.py similarity index 99% rename from tests/test_bilateral_precise.py rename to tests/networks/layers/filtering/test_bilateral_precise.py index a917398657..505f0a6303 100644 --- a/tests/test_bilateral_precise.py +++ b/tests/networks/layers/filtering/test_bilateral_precise.py @@ -366,7 +366,6 @@ @skip_if_no_cpp_extension @skip_if_quick class BilateralFilterTestCaseCpuPrecise(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_cpu_precise(self, test_case_description, sigmas, input, expected): # Params to determine the implementation to test @@ -400,7 +399,6 @@ def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expec @skip_if_no_cuda @skip_if_no_cpp_extension class BilateralFilterTestCaseCudaPrecise(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_cuda_precise(self, test_case_description, sigmas, input, expected): # Skip this test diff --git a/tests/test_phl_cpu.py b/tests/networks/layers/filtering/test_phl_cpu.py similarity index 99% rename from tests/test_phl_cpu.py rename to tests/networks/layers/filtering/test_phl_cpu.py index 12b840cabf..f6905283a5 100644 --- a/tests/test_phl_cpu.py +++ b/tests/networks/layers/filtering/test_phl_cpu.py @@ -242,7 +242,6 @@ @skip_if_no_cpp_extension class PHLFilterTestCaseCpu(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_cpu(self, test_case_description, sigmas, input, features, expected): # Create input tensors diff --git a/tests/test_phl_cuda.py b/tests/networks/layers/filtering/test_phl_cuda.py similarity index 99% rename from tests/test_phl_cuda.py rename to tests/networks/layers/filtering/test_phl_cuda.py index 046b06e71e..e823639ffc 100644 --- a/tests/test_phl_cuda.py +++ b/tests/networks/layers/filtering/test_phl_cuda.py @@ -150,7 +150,6 @@ @skip_if_no_cuda @skip_if_no_cpp_extension class PHLFilterTestCaseCuda(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_cuda(self, test_case_description, sigmas, input, features, expected): # Create input tensors diff --git a/tests/test_trainable_bilateral.py b/tests/networks/layers/filtering/test_trainable_bilateral.py similarity index 99% rename from tests/test_trainable_bilateral.py rename to tests/networks/layers/filtering/test_trainable_bilateral.py index ea8cb8a9dc..ed7e5a0a03 100644 --- a/tests/test_trainable_bilateral.py +++ b/tests/networks/layers/filtering/test_trainable_bilateral.py @@ -273,7 +273,6 @@ @skip_if_no_cpp_extension class BilateralFilterTestCaseCpuPrecise(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_cpu_precise(self, test_case_description, sigmas, input, expected): # Params to determine the implementation to test @@ -372,7 +371,6 @@ def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expec @skip_if_no_cuda @skip_if_no_cpp_extension class BilateralFilterTestCaseCudaPrecise(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_cuda_precise(self, test_case_description, sigmas, input, expected): # Skip this test diff --git a/tests/test_trainable_joint_bilateral.py b/tests/networks/layers/filtering/test_trainable_joint_bilateral.py similarity index 99% rename from tests/test_trainable_joint_bilateral.py rename to tests/networks/layers/filtering/test_trainable_joint_bilateral.py index a21596945b..fa7509b376 100644 --- a/tests/test_trainable_joint_bilateral.py +++ b/tests/networks/layers/filtering/test_trainable_joint_bilateral.py @@ -358,7 +358,6 @@ @skip_if_no_cpp_extension @skip_if_quick class JointBilateralFilterTestCaseCpuPrecise(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_cpu_precise(self, test_case_description, sigmas, input, guide, expected): # Params to determine the implementation to test @@ -482,7 +481,6 @@ def test_cpu_precise_backwards(self, test_case_description, sigmas, input, guide @skip_if_no_cuda @skip_if_no_cpp_extension class JointBilateralFilterTestCaseCudaPrecise(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_cuda_precise(self, test_case_description, sigmas, input, guide, expected): # Skip this test diff --git a/tests/test_affine_transform.py b/tests/networks/layers/test_affine_transform.py similarity index 99% rename from tests/test_affine_transform.py rename to tests/networks/layers/test_affine_transform.py index 7410de9803..627a4cb1b9 100644 --- a/tests/test_affine_transform.py +++ b/tests/networks/layers/test_affine_transform.py @@ -83,7 +83,6 @@ class TestNormTransform(unittest.TestCase): - @parameterized.expand(TEST_NORM_CASES) def test_norm_xform(self, input_shape, align_corners, expected, zero_centered=False): norm = normalize_transform( @@ -108,7 +107,6 @@ def test_norm_xform(self, input_shape, align_corners, expected, zero_centered=Fa class TestToNormAffine(unittest.TestCase): - @parameterized.expand(TEST_TO_NORM_AFFINE_CASES) def test_to_norm_affine(self, affine, src_size, dst_size, align_corners, expected, zero_centered=False): affine = torch.as_tensor(affine, device=torch.device("cpu:0"), dtype=torch.float32) @@ -132,7 +130,6 @@ def test_to_norm_affine_ill(self, affine, src_size, dst_size, align_corners): class TestAffineTransform(unittest.TestCase): - @parameterized.expand( [ (torch.as_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, -1.0]]), [[[[0, 4, 1, 3], [0, 7, 6, 8], [0, 3, 5, 3]]]]), diff --git a/tests/test_apply_filter.py b/tests/networks/layers/test_apply_filter.py similarity index 100% rename from tests/test_apply_filter.py rename to tests/networks/layers/test_apply_filter.py diff --git a/tests/test_channel_pad.py b/tests/networks/layers/test_channel_pad.py similarity index 100% rename from tests/test_channel_pad.py rename to tests/networks/layers/test_channel_pad.py diff --git a/tests/test_conjugate_gradient.py b/tests/networks/layers/test_conjugate_gradient.py similarity index 100% rename from tests/test_conjugate_gradient.py rename to tests/networks/layers/test_conjugate_gradient.py diff --git a/tests/test_drop_path.py b/tests/networks/layers/test_drop_path.py similarity index 100% rename from tests/test_drop_path.py rename to tests/networks/layers/test_drop_path.py diff --git a/tests/test_gaussian.py b/tests/networks/layers/test_gaussian.py similarity index 100% rename from tests/test_gaussian.py rename to tests/networks/layers/test_gaussian.py diff --git a/tests/test_gaussian_filter.py b/tests/networks/layers/test_gaussian_filter.py similarity index 99% rename from tests/test_gaussian_filter.py rename to tests/networks/layers/test_gaussian_filter.py index 2d5c935f90..7ed99c3ed7 100644 --- a/tests/test_gaussian_filter.py +++ b/tests/networks/layers/test_gaussian_filter.py @@ -36,7 +36,6 @@ @SkipIfAtLeastPyTorchVersion((2, 2, 0)) # https://github.com/Project-MONAI/MONAI/issues/7445 class TestGaussianFilterBackprop(unittest.TestCase): - def code_to_run(self, input_args): input_dims = input_args.get("dims", (2, 3, 8)) device = ( @@ -97,7 +96,6 @@ def test_train_slow(self, input_args): @SkipIfAtLeastPyTorchVersion((2, 2, 0)) # https://github.com/Project-MONAI/MONAI/issues/7445 class GaussianFilterTestCase(unittest.TestCase): - def test_1d(self): a = torch.ones(1, 8, 10) g = GaussianFilter(1, 3, 3).to(torch.device("cpu:0")) diff --git a/tests/test_get_layers.py b/tests/networks/layers/test_get_layers.py similarity index 100% rename from tests/test_get_layers.py rename to tests/networks/layers/test_get_layers.py diff --git a/tests/test_gmm.py b/tests/networks/layers/test_gmm.py similarity index 99% rename from tests/test_gmm.py rename to tests/networks/layers/test_gmm.py index e582f7668c..c4e9f3c3f5 100644 --- a/tests/test_gmm.py +++ b/tests/networks/layers/test_gmm.py @@ -261,7 +261,6 @@ @skip_if_quick class GMMTestCase(unittest.TestCase): - def setUp(self): self._var = os.environ.get("TORCH_EXTENSIONS_DIR") self.tempdir = tempfile.mkdtemp() diff --git a/tests/test_grid_pull.py b/tests/networks/layers/test_grid_pull.py similarity index 99% rename from tests/test_grid_pull.py rename to tests/networks/layers/test_grid_pull.py index 79f18f2b60..99b9b40791 100644 --- a/tests/test_grid_pull.py +++ b/tests/networks/layers/test_grid_pull.py @@ -63,7 +63,6 @@ def make_grid(shape, dtype=None, device=None, requires_grad=True): @skip_if_no_cpp_extension class TestGridPull(unittest.TestCase): - @parameterized.expand(TEST_1D_GP, skip_on_empty=True) def test_grid_pull(self, input_param, expected): result = grid_pull(**input_param) diff --git a/tests/test_hilbert_transform.py b/tests/networks/layers/test_hilbert_transform.py similarity index 99% rename from tests/test_hilbert_transform.py rename to tests/networks/layers/test_hilbert_transform.py index d484e230dd..c12136aecf 100644 --- a/tests/test_hilbert_transform.py +++ b/tests/networks/layers/test_hilbert_transform.py @@ -114,7 +114,6 @@ def create_expected_numpy_output(input_datum, **kwargs): @SkipIfNoModule("torch.fft") class TestHilbertTransformCPU(unittest.TestCase): - @parameterized.expand(TEST_CASES_CPU + TEST_CASES_GPU) def test_value(self, arguments, image, expected_data, atol): result = HilbertTransform(**arguments)(image) @@ -124,7 +123,6 @@ def test_value(self, arguments, image, expected_data, atol): @SkipIfModule("torch.fft") class TestHilbertTransformNoFFTMod(unittest.TestCase): - def test_no_fft_module_error(self): self.assertRaises(OptionalImportError, HilbertTransform(), torch.randn(1, 1, 10)) diff --git a/tests/test_lltm.py b/tests/networks/layers/test_lltm.py similarity index 99% rename from tests/test_lltm.py rename to tests/networks/layers/test_lltm.py index 0b72e35146..c54488af85 100644 --- a/tests/test_lltm.py +++ b/tests/networks/layers/test_lltm.py @@ -29,7 +29,6 @@ class TestLLTM(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) @SkipIfNoModule("monai._C") def test_value(self, input_param, expected_h, expected_c): diff --git a/tests/test_median_filter.py b/tests/networks/layers/test_median_filter.py similarity index 100% rename from tests/test_median_filter.py rename to tests/networks/layers/test_median_filter.py diff --git a/tests/test_polyval.py b/tests/networks/layers/test_polyval.py similarity index 100% rename from tests/test_polyval.py rename to tests/networks/layers/test_polyval.py diff --git a/tests/test_preset_filters.py b/tests/networks/layers/test_preset_filters.py similarity index 100% rename from tests/test_preset_filters.py rename to tests/networks/layers/test_preset_filters.py diff --git a/tests/test_savitzky_golay_filter.py b/tests/networks/layers/test_savitzky_golay_filter.py similarity index 99% rename from tests/test_savitzky_golay_filter.py rename to tests/networks/layers/test_savitzky_golay_filter.py index caa1b5c0af..ea4b97c80c 100644 --- a/tests/test_savitzky_golay_filter.py +++ b/tests/networks/layers/test_savitzky_golay_filter.py @@ -100,7 +100,6 @@ class TestSavitzkyGolayCPU(unittest.TestCase): - @parameterized.expand( [TEST_CASE_SINGLE_VALUE, TEST_CASE_1D, TEST_CASE_2D_AXIS_2, TEST_CASE_2D_AXIS_3, TEST_CASE_SINE_SMOOTH] ) @@ -110,7 +109,6 @@ def test_value(self, arguments, image, expected_data, atol, rtol=1e-5): class TestSavitzkyGolayCPUREP(unittest.TestCase): - @parameterized.expand( [TEST_CASE_SINGLE_VALUE_REP, TEST_CASE_1D_REP, TEST_CASE_2D_AXIS_2_REP, TEST_CASE_2D_AXIS_3_REP] ) @@ -121,7 +119,6 @@ def test_value(self, arguments, image, expected_data, atol, rtol=1e-5): @skip_if_no_cuda class TestSavitzkyGolayGPU(unittest.TestCase): - @parameterized.expand( [TEST_CASE_SINGLE_VALUE, TEST_CASE_1D, TEST_CASE_2D_AXIS_2, TEST_CASE_2D_AXIS_3, TEST_CASE_SINE_SMOOTH] ) @@ -132,7 +129,6 @@ def test_value(self, arguments, image, expected_data, atol, rtol=1e-5): @skip_if_no_cuda class TestSavitzkyGolayGPUREP(unittest.TestCase): - @parameterized.expand( [TEST_CASE_SINGLE_VALUE_REP, TEST_CASE_1D_REP, TEST_CASE_2D_AXIS_2_REP, TEST_CASE_2D_AXIS_3_REP] ) diff --git a/tests/test_separable_filter.py b/tests/networks/layers/test_separable_filter.py similarity index 100% rename from tests/test_separable_filter.py rename to tests/networks/layers/test_separable_filter.py diff --git a/tests/test_skip_connection.py b/tests/networks/layers/test_skip_connection.py similarity index 100% rename from tests/test_skip_connection.py rename to tests/networks/layers/test_skip_connection.py diff --git a/tests/test_vector_quantizer.py b/tests/networks/layers/test_vector_quantizer.py similarity index 100% rename from tests/test_vector_quantizer.py rename to tests/networks/layers/test_vector_quantizer.py diff --git a/tests/test_weight_init.py b/tests/networks/layers/test_weight_init.py similarity index 100% rename from tests/test_weight_init.py rename to tests/networks/layers/test_weight_init.py diff --git a/tests/networks/nets/__init__.py b/tests/networks/nets/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/nets/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/networks/nets/dints/__init__.py b/tests/networks/nets/dints/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/nets/dints/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_dints_cell.py b/tests/networks/nets/dints/test_dints_cell.py similarity index 100% rename from tests/test_dints_cell.py rename to tests/networks/nets/dints/test_dints_cell.py diff --git a/tests/test_dints_mixop.py b/tests/networks/nets/dints/test_dints_mixop.py similarity index 99% rename from tests/test_dints_mixop.py rename to tests/networks/nets/dints/test_dints_mixop.py index ea22b06f8b..ea78514fa5 100644 --- a/tests/test_dints_mixop.py +++ b/tests/networks/nets/dints/test_dints_mixop.py @@ -61,7 +61,6 @@ class TestMixOP(unittest.TestCase): - @parameterized.expand(TEST_CASES_3D) def test_mixop_3d(self, input_param, ops, weight, input_shape, expected_shape): net = MixedOp(ops=Cell.OPS3D, **input_param) diff --git a/tests/networks/nets/regunet/__init__.py b/tests/networks/nets/regunet/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/nets/regunet/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_localnet.py b/tests/networks/nets/regunet/test_localnet.py similarity index 99% rename from tests/test_localnet.py rename to tests/networks/nets/regunet/test_localnet.py index ee920436ff..d255934495 100644 --- a/tests/test_localnet.py +++ b/tests/networks/nets/regunet/test_localnet.py @@ -62,7 +62,6 @@ class TestLocalNet(unittest.TestCase): - @parameterized.expand(TEST_CASE_LOCALNET_2D + TEST_CASE_LOCALNET_3D) def test_shape(self, input_param, input_shape, expected_shape): net = LocalNet(**input_param).to(device) diff --git a/tests/test_regunet.py b/tests/networks/nets/regunet/test_regunet.py similarity index 99% rename from tests/test_regunet.py rename to tests/networks/nets/regunet/test_regunet.py index 1fcab5e554..152a05fc62 100644 --- a/tests/test_regunet.py +++ b/tests/networks/nets/regunet/test_regunet.py @@ -63,7 +63,6 @@ class TestREGUNET(unittest.TestCase): - @parameterized.expand(TEST_CASE_REGUNET_2D + TEST_CASE_REGUNET_3D) def test_shape(self, input_param, input_shape, expected_shape): net = RegUNet(**input_param).to(device) diff --git a/tests/test_ahnet.py b/tests/networks/nets/test_ahnet.py similarity index 99% rename from tests/test_ahnet.py rename to tests/networks/nets/test_ahnet.py index 4dd90e8d84..7facf9af24 100644 --- a/tests/test_ahnet.py +++ b/tests/networks/nets/test_ahnet.py @@ -126,7 +126,6 @@ class TestFCN(unittest.TestCase): - @parameterized.expand([TEST_CASE_FCN_1, TEST_CASE_FCN_2, TEST_CASE_FCN_3]) @skip_if_quick def test_fcn_shape(self, input_param, input_shape, expected_shape): @@ -137,7 +136,6 @@ def test_fcn_shape(self, input_param, input_shape, expected_shape): class TestFCNWithPretrain(unittest.TestCase): - @parameterized.expand([TEST_CASE_FCN_WITH_PRETRAIN_1, TEST_CASE_FCN_WITH_PRETRAIN_2]) @skip_if_quick def test_fcn_shape(self, input_param, input_shape, expected_shape): @@ -148,7 +146,6 @@ def test_fcn_shape(self, input_param, input_shape, expected_shape): class TestMCFCN(unittest.TestCase): - @parameterized.expand([TEST_CASE_MCFCN_1, TEST_CASE_MCFCN_2, TEST_CASE_MCFCN_3]) def test_mcfcn_shape(self, input_param, input_shape, expected_shape): net = MCFCN(**input_param).to(device) @@ -158,7 +155,6 @@ def test_mcfcn_shape(self, input_param, input_shape, expected_shape): class TestMCFCNWithPretrain(unittest.TestCase): - @parameterized.expand([TEST_CASE_MCFCN_WITH_PRETRAIN_1, TEST_CASE_MCFCN_WITH_PRETRAIN_2]) def test_mcfcn_shape(self, input_param, input_shape, expected_shape): net = test_pretrained_networks(MCFCN, input_param, device) @@ -168,7 +164,6 @@ def test_mcfcn_shape(self, input_param, input_shape, expected_shape): class TestAHNET(unittest.TestCase): - @parameterized.expand([TEST_CASE_AHNET_2D_1, TEST_CASE_AHNET_2D_2, TEST_CASE_AHNET_2D_3]) def test_ahnet_shape_2d(self, input_param, input_shape, expected_shape): net = AHNet(**input_param).to(device) @@ -197,7 +192,6 @@ def test_script(self): class TestAHNETWithPretrain(unittest.TestCase): - @parameterized.expand( [TEST_CASE_AHNET_3D_WITH_PRETRAIN_1, TEST_CASE_AHNET_3D_WITH_PRETRAIN_2, TEST_CASE_AHNET_3D_WITH_PRETRAIN_3] ) diff --git a/tests/test_attentionunet.py b/tests/networks/nets/test_attentionunet.py similarity index 99% rename from tests/test_attentionunet.py rename to tests/networks/nets/test_attentionunet.py index bb14ef0222..30b9a76aad 100644 --- a/tests/test_attentionunet.py +++ b/tests/networks/nets/test_attentionunet.py @@ -26,7 +26,6 @@ def get_net_parameters(net: nn.Module) -> int: class TestAttentionUnet(unittest.TestCase): - def test_attention_block(self): for dims in [2, 3]: block = att.AttentionBlock(dims, f_int=2, f_g=6, f_l=6) diff --git a/tests/test_autoencoder.py b/tests/networks/nets/test_autoencoder.py similarity index 99% rename from tests/test_autoencoder.py rename to tests/networks/nets/test_autoencoder.py index 3f2f131900..dcf90b809a 100644 --- a/tests/test_autoencoder.py +++ b/tests/networks/nets/test_autoencoder.py @@ -74,7 +74,6 @@ class TestAutoEncoder(unittest.TestCase): - @parameterized.expand(CASES) def test_shape(self, input_param, input_shape, expected_shape): net = AutoEncoder(**input_param).to(device) diff --git a/tests/test_autoencoderkl.py b/tests/networks/nets/test_autoencoderkl.py similarity index 100% rename from tests/test_autoencoderkl.py rename to tests/networks/nets/test_autoencoderkl.py diff --git a/tests/test_basic_unet.py b/tests/networks/nets/test_basic_unet.py similarity index 99% rename from tests/test_basic_unet.py rename to tests/networks/nets/test_basic_unet.py index 976846d53d..edd7b599d4 100644 --- a/tests/test_basic_unet.py +++ b/tests/networks/nets/test_basic_unet.py @@ -83,7 +83,6 @@ class TestBasicUNET(unittest.TestCase): - @parameterized.expand(CASES_1D + CASES_2D + CASES_3D) def test_shape(self, input_param, input_shape, expected_shape): device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_basic_unetplusplus.py b/tests/networks/nets/test_basic_unetplusplus.py similarity index 99% rename from tests/test_basic_unetplusplus.py rename to tests/networks/nets/test_basic_unetplusplus.py index 11ac95bf48..37a2813f2e 100644 --- a/tests/test_basic_unetplusplus.py +++ b/tests/networks/nets/test_basic_unetplusplus.py @@ -83,7 +83,6 @@ class TestBasicUNETPlusPlus(unittest.TestCase): - @parameterized.expand(CASES_1D + CASES_2D + CASES_3D) def test_shape(self, input_param, input_shape, expected_shape): device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_bundle_init_bundle.py b/tests/networks/nets/test_bundle_init_bundle.py similarity index 99% rename from tests/test_bundle_init_bundle.py rename to tests/networks/nets/test_bundle_init_bundle.py index 90d02cdafa..13212091cb 100644 --- a/tests/test_bundle_init_bundle.py +++ b/tests/networks/nets/test_bundle_init_bundle.py @@ -23,7 +23,6 @@ @skip_if_windows class TestBundleInit(unittest.TestCase): - def test_bundle(self): with tempfile.TemporaryDirectory() as tempdir: net = UNet(2, 1, 1, [4, 8], [2]) diff --git a/tests/test_cell_sam_wrapper.py b/tests/networks/nets/test_cell_sam_wrapper.py similarity index 100% rename from tests/test_cell_sam_wrapper.py rename to tests/networks/nets/test_cell_sam_wrapper.py diff --git a/tests/test_controlnet.py b/tests/networks/nets/test_controlnet.py similarity index 100% rename from tests/test_controlnet.py rename to tests/networks/nets/test_controlnet.py diff --git a/tests/test_daf3d.py b/tests/networks/nets/test_daf3d.py similarity index 99% rename from tests/test_daf3d.py rename to tests/networks/nets/test_daf3d.py index cbd150f439..e707cfb272 100644 --- a/tests/test_daf3d.py +++ b/tests/networks/nets/test_daf3d.py @@ -42,7 +42,6 @@ @unittest.skipUnless(has_tv, "torchvision not installed") class TestDAF3D(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_shape, expected_shape): device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_densenet.py b/tests/networks/nets/test_densenet.py similarity index 99% rename from tests/test_densenet.py rename to tests/networks/nets/test_densenet.py index e28528195d..b0f70a9bde 100644 --- a/tests/test_densenet.py +++ b/tests/networks/nets/test_densenet.py @@ -79,7 +79,6 @@ class TestPretrainedDENSENET(unittest.TestCase): - @parameterized.expand([TEST_PRETRAINED_2D_CASE_1, TEST_PRETRAINED_2D_CASE_2]) @skip_if_quick def test_121_2d_shape_pretrain(self, model, input_param, input_shape, expected_shape): @@ -104,7 +103,6 @@ def test_pretrain_consistency(self, model, input_param, input_shape): class TestDENSENET(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_densenet_shape(self, model, input_param, input_shape, expected_shape): net = model(**input_param).to(device) diff --git a/tests/test_diffusion_model_unet.py b/tests/networks/nets/test_diffusion_model_unet.py similarity index 100% rename from tests/test_diffusion_model_unet.py rename to tests/networks/nets/test_diffusion_model_unet.py diff --git a/tests/test_dints_network.py b/tests/networks/nets/test_dints_network.py similarity index 99% rename from tests/test_dints_network.py rename to tests/networks/nets/test_dints_network.py index 178b87a3dc..449d5045fe 100644 --- a/tests/test_dints_network.py +++ b/tests/networks/nets/test_dints_network.py @@ -115,7 +115,6 @@ @skip_if_quick class TestDints(unittest.TestCase): - @parameterized.expand(TEST_CASES_3D + TEST_CASES_2D) def test_dints_inference(self, dints_grid_params, dints_params, input_shape, expected_shape): grid = TopologySearch(**dints_grid_params) @@ -156,7 +155,6 @@ def test_dints_search(self, dints_grid_params, dints_params, input_shape, expect @SkipIfBeforePyTorchVersion((1, 9)) class TestDintsTS(unittest.TestCase): - @parameterized.expand(TEST_CASES_3D + TEST_CASES_2D) def test_script(self, dints_grid_params, dints_params, input_shape, _): grid = TopologyInstance(**dints_grid_params) diff --git a/tests/test_discriminator.py b/tests/networks/nets/test_discriminator.py similarity index 99% rename from tests/test_discriminator.py rename to tests/networks/nets/test_discriminator.py index b13c825284..8f460a2450 100644 --- a/tests/test_discriminator.py +++ b/tests/networks/nets/test_discriminator.py @@ -42,7 +42,6 @@ class TestDiscriminator(unittest.TestCase): - @parameterized.expand(CASES) def test_shape(self, input_param, input_data, expected_shape): net = Discriminator(**input_param) diff --git a/tests/test_dynunet.py b/tests/networks/nets/test_dynunet.py similarity index 99% rename from tests/test_dynunet.py rename to tests/networks/nets/test_dynunet.py index f7096b8970..a9dd588e13 100644 --- a/tests/test_dynunet.py +++ b/tests/networks/nets/test_dynunet.py @@ -117,7 +117,6 @@ class TestDynUNet(unittest.TestCase): - @parameterized.expand(TEST_CASE_DYNUNET_3D) def test_shape(self, input_param, input_shape, expected_shape): net = DynUNet(**input_param).to(device) @@ -137,7 +136,6 @@ def test_script(self): @skip_if_no_cuda @skip_if_windows class TestDynUNetWithInstanceNorm3dNVFuser(unittest.TestCase): - def setUp(self): try: layer = InstanceNorm3dNVFuser(num_features=1, affine=False).to("cuda:0") @@ -171,7 +169,6 @@ def test_consistency(self, input_param, input_shape, _): class TestDynUNetDeepSupervision(unittest.TestCase): - @parameterized.expand(TEST_CASE_DEEP_SUPERVISION) def test_shape(self, input_param, input_shape, expected_shape): net = DynUNet(**input_param).to(device) diff --git a/tests/test_efficientnet.py b/tests/networks/nets/test_efficientnet.py similarity index 97% rename from tests/test_efficientnet.py rename to tests/networks/nets/test_efficientnet.py index 92c7c667c3..e76d5a6d5a 100644 --- a/tests/test_efficientnet.py +++ b/tests/networks/nets/test_efficientnet.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from typing import TYPE_CHECKING from unittest import skipUnless @@ -30,6 +31,8 @@ from monai.utils import optional_import from tests.test_utils import skip_if_downloading_fails, skip_if_quick, test_pretrained_networks, test_script_save +TESTS_PATH = Path(__file__).parents[2] + if TYPE_CHECKING: import torchvision @@ -162,7 +165,7 @@ def make_shape_cases( "norm": ("batch", {"eps": 1e-3, "momentum": 0.01}), "adv_prop": False, }, - os.path.join(os.path.dirname(__file__), "testing_data", "kitty_test.jpg"), + os.path.join(TESTS_PATH, "testing_data", "kitty_test.jpg"), 282, # ~ tiger cat ), ( @@ -174,7 +177,7 @@ def make_shape_cases( "in_channels": 3, "num_classes": 1000, }, - os.path.join(os.path.dirname(__file__), "testing_data", "kitty_test.jpg"), + os.path.join(TESTS_PATH, "testing_data", "kitty_test.jpg"), 282, # ~ tiger cat ), ( @@ -186,7 +189,7 @@ def make_shape_cases( "in_channels": 3, "num_classes": 1000, }, - os.path.join(os.path.dirname(__file__), "testing_data", "kitty_test.jpg"), + os.path.join(TESTS_PATH, "testing_data", "kitty_test.jpg"), 282, # ~ tiger cat ), ] @@ -248,7 +251,6 @@ def make_shape_cases( class TestEFFICIENTNET(unittest.TestCase): - @parameterized.expand(CASES_1D + CASES_2D + CASES_3D + CASES_VARIATIONS) def test_shape(self, input_param, input_shape, expected_shape): device = "cuda" if torch.cuda.is_available() else "cpu" @@ -377,7 +379,6 @@ def test_script(self): class TestExtractFeatures(unittest.TestCase): - @parameterized.expand(CASE_EXTRACT_FEATURES) def test_shape(self, input_param, input_shape, expected_shapes): device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_flexible_unet.py b/tests/networks/nets/test_flexible_unet.py similarity index 99% rename from tests/test_flexible_unet.py rename to tests/networks/nets/test_flexible_unet.py index f834d5d45f..5efe436c27 100644 --- a/tests/test_flexible_unet.py +++ b/tests/networks/nets/test_flexible_unet.py @@ -34,7 +34,6 @@ class DummyEncoder(BaseEncoder): - @classmethod def get_encoder_parameters(cls): basic_dict = {"spatial_dims": 2, "in_channels": 3, "pretrained": False} @@ -282,7 +281,6 @@ def make_error_case(): @SkipIfNoModule("hf_hub_download") @skip_if_quick class TestFLEXIBLEUNET(unittest.TestCase): - @parameterized.expand(CASES_2D + CASES_3D + CASES_VARIATIONS) def test_shape(self, input_param, input_shape, expected_shape): device = "cuda" if torch.cuda.is_available() else "cpu" @@ -323,7 +321,6 @@ def test_error_raise(self, input_param): class TestFlexUNetEncoderRegister(unittest.TestCase): - @parameterized.expand(CASE_REGISTER_ENCODER) def test_regist(self, encoder): tmp_backbone = FlexUNetEncoderRegister() diff --git a/tests/test_fullyconnectednet.py b/tests/networks/nets/test_fullyconnectednet.py similarity index 100% rename from tests/test_fullyconnectednet.py rename to tests/networks/nets/test_fullyconnectednet.py diff --git a/tests/test_generator.py b/tests/networks/nets/test_generator.py similarity index 99% rename from tests/test_generator.py rename to tests/networks/nets/test_generator.py index 9c8bc33494..9ec08194e9 100644 --- a/tests/test_generator.py +++ b/tests/networks/nets/test_generator.py @@ -42,7 +42,6 @@ class TestGenerator(unittest.TestCase): - @parameterized.expand(CASES) def test_shape(self, input_param, input_data, expected_shape): net = Generator(**input_param) diff --git a/tests/test_globalnet.py b/tests/networks/nets/test_globalnet.py similarity index 99% rename from tests/test_globalnet.py rename to tests/networks/nets/test_globalnet.py index aa8bd77312..ecb0243a1b 100644 --- a/tests/test_globalnet.py +++ b/tests/networks/nets/test_globalnet.py @@ -65,7 +65,6 @@ class TestAffineHead(unittest.TestCase): - @parameterized.expand(TEST_CASES_AFFINE_TRANSFORM) def test_shape(self, input_param, theta, expected_val): layer = AffineHead(**input_param) @@ -79,7 +78,6 @@ def test_shape(self, input_param, theta, expected_val): class TestGlobalNet(unittest.TestCase): - @parameterized.expand(TEST_CASES_GLOBAL_NET) def test_shape(self, input_param, input_shape, expected_shape): net = GlobalNet(**input_param).to(device) diff --git a/tests/test_highresnet.py b/tests/networks/nets/test_highresnet.py similarity index 99% rename from tests/test_highresnet.py rename to tests/networks/nets/test_highresnet.py index bf95f4579a..1384dfaeff 100644 --- a/tests/test_highresnet.py +++ b/tests/networks/nets/test_highresnet.py @@ -48,7 +48,6 @@ class TestHighResNet(DistTestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_shape(self, input_param, input_shape, expected_shape): net = HighResNet(**input_param).to(device) diff --git a/tests/test_hovernet.py b/tests/networks/nets/test_hovernet.py similarity index 99% rename from tests/test_hovernet.py rename to tests/networks/nets/test_hovernet.py index a664bfe1a7..58657e6dea 100644 --- a/tests/test_hovernet.py +++ b/tests/networks/nets/test_hovernet.py @@ -154,7 +154,6 @@ def check_kernels(net, mode): class TestHoverNet(unittest.TestCase): - @parameterized.expand(CASES) def test_shape(self, input_param, input_shape, expected_shapes): input_param["decoder_padding"] = False diff --git a/tests/test_mednext.py b/tests/networks/nets/test_mednext.py similarity index 100% rename from tests/test_mednext.py rename to tests/networks/nets/test_mednext.py diff --git a/tests/test_milmodel.py b/tests/networks/nets/test_milmodel.py similarity index 99% rename from tests/test_milmodel.py rename to tests/networks/nets/test_milmodel.py index ee2b969ea2..4e3c9056ef 100644 --- a/tests/test_milmodel.py +++ b/tests/networks/nets/test_milmodel.py @@ -63,7 +63,6 @@ class TestMilModel(unittest.TestCase): - @parameterized.expand(TEST_CASE_MILMODEL) def test_shape(self, input_param, input_shape, expected_shape): with skip_if_downloading_fails(): diff --git a/tests/test_net_adapter.py b/tests/networks/nets/test_net_adapter.py similarity index 99% rename from tests/test_net_adapter.py rename to tests/networks/nets/test_net_adapter.py index c441f7409b..08344900e4 100644 --- a/tests/test_net_adapter.py +++ b/tests/networks/nets/test_net_adapter.py @@ -42,7 +42,6 @@ class TestNetAdapter(unittest.TestCase): - @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_shape(self, input_param, input_shape, expected_shape): spatial_dims = input_param["dim"] diff --git a/tests/test_network_consistency.py b/tests/networks/nets/test_network_consistency.py similarity index 99% rename from tests/test_network_consistency.py rename to tests/networks/nets/test_network_consistency.py index 6b67ba8ab2..e09826de75 100644 --- a/tests/test_network_consistency.py +++ b/tests/networks/nets/test_network_consistency.py @@ -38,7 +38,6 @@ class TestNetworkConsistency(unittest.TestCase): - def setUp(self): set_determinism(0) diff --git a/tests/test_patch_gan_dicriminator.py b/tests/networks/nets/test_patch_gan_dicriminator.py similarity index 100% rename from tests/test_patch_gan_dicriminator.py rename to tests/networks/nets/test_patch_gan_dicriminator.py diff --git a/tests/test_quicknat.py b/tests/networks/nets/test_quicknat.py similarity index 99% rename from tests/test_quicknat.py rename to tests/networks/nets/test_quicknat.py index 918e4c6e28..6653965c08 100644 --- a/tests/test_quicknat.py +++ b/tests/networks/nets/test_quicknat.py @@ -38,7 +38,6 @@ @unittest.skipUnless(has_se, "squeeze_and_excitation not installed") class TestQuicknat(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_shape, expected_shape): device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_resnet.py b/tests/networks/nets/test_resnet.py similarity index 99% rename from tests/test_resnet.py rename to tests/networks/nets/test_resnet.py index ad5ee322e4..371ec89682 100644 --- a/tests/test_resnet.py +++ b/tests/networks/nets/test_resnet.py @@ -242,7 +242,6 @@ class TestResNet(unittest.TestCase): - def setUp(self): self.tmp_ckpt_filename = os.path.join("tests", "monai_unittest_tmp_ckpt.pth") @@ -320,7 +319,6 @@ def test_script(self, model, input_param, input_shape, expected_shape): @SkipIfNoModule("hf_hub_download") class TestExtractFeatures(unittest.TestCase): - @parameterized.expand(CASE_EXTRACT_FEATURES) def test_shape(self, input_param, input_shape, expected_shapes): device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_segresnet.py b/tests/networks/nets/test_segresnet.py similarity index 99% rename from tests/test_segresnet.py rename to tests/networks/nets/test_segresnet.py index 82f530cb8d..b3b3d1051a 100644 --- a/tests/test_segresnet.py +++ b/tests/networks/nets/test_segresnet.py @@ -83,7 +83,6 @@ class TestResNet(unittest.TestCase): - @parameterized.expand(TEST_CASE_SEGRESNET + TEST_CASE_SEGRESNET_2) def test_shape(self, input_param, input_shape, expected_shape): net = SegResNet(**input_param).to(device) @@ -103,7 +102,6 @@ def test_script(self): class TestResNetVAE(unittest.TestCase): - @parameterized.expand(TEST_CASE_SEGRESNET_VAE) def test_vae_shape(self, input_param, input_shape, expected_shape): net = SegResNetVAE(**input_param).to(device) diff --git a/tests/test_segresnet_ds.py b/tests/networks/nets/test_segresnet_ds.py similarity index 99% rename from tests/test_segresnet_ds.py rename to tests/networks/nets/test_segresnet_ds.py index 858d958f1c..064b2ba06c 100644 --- a/tests/test_segresnet_ds.py +++ b/tests/networks/nets/test_segresnet_ds.py @@ -72,7 +72,6 @@ class TestSegResNetDS(unittest.TestCase): - @parameterized.expand(TEST_CASE_SEGRESNET_DS) def test_shape(self, input_param, input_shape, expected_shape): net = SegResNetDS(**input_param).to(device) diff --git a/tests/test_senet.py b/tests/networks/nets/test_senet.py similarity index 98% rename from tests/test_senet.py rename to tests/networks/nets/test_senet.py index a1dc11e4cc..90d711d0d9 100644 --- a/tests/test_senet.py +++ b/tests/networks/nets/test_senet.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from typing import TYPE_CHECKING from unittest import skipUnless @@ -58,7 +59,6 @@ class TestSENET(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) def test_senet_shape(self, net, net_args): input_data = torch.randn(2, 2, 64, 64, 64).to(device) @@ -76,7 +76,6 @@ def test_script(self, net, net_args): class TestPretrainedSENET(unittest.TestCase): - def setUp(self): self.original_urls = se_mod.SE_NET_MODELS.copy() replace_url = test_is_quick() @@ -88,7 +87,7 @@ def setUp(self): if "certificate" in str(rt_e): # [SSL: CERTIFICATE_VERIFY_FAILED] replace_url = True if replace_url: - testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + testing_dir = Path(__file__).parents[2] / "testing_data" testing_data_urls = { "senet154": { "url": testing_data_config("models", "senet154-c7b49a05", "url"), diff --git a/tests/test_spade_autoencoderkl.py b/tests/networks/nets/test_spade_autoencoderkl.py similarity index 100% rename from tests/test_spade_autoencoderkl.py rename to tests/networks/nets/test_spade_autoencoderkl.py diff --git a/tests/test_spade_diffusion_model_unet.py b/tests/networks/nets/test_spade_diffusion_model_unet.py similarity index 100% rename from tests/test_spade_diffusion_model_unet.py rename to tests/networks/nets/test_spade_diffusion_model_unet.py diff --git a/tests/test_spade_vaegan.py b/tests/networks/nets/test_spade_vaegan.py similarity index 100% rename from tests/test_spade_vaegan.py rename to tests/networks/nets/test_spade_vaegan.py diff --git a/tests/test_swin_unetr.py b/tests/networks/nets/test_swin_unetr.py similarity index 99% rename from tests/test_swin_unetr.py rename to tests/networks/nets/test_swin_unetr.py index 08dee959bb..4c506a2861 100644 --- a/tests/test_swin_unetr.py +++ b/tests/networks/nets/test_swin_unetr.py @@ -76,7 +76,6 @@ class TestSWINUNETR(unittest.TestCase): - @parameterized.expand(TEST_CASE_SWIN_UNETR) @skipUnless(has_einops, "Requires einops") def test_shape(self, input_param, input_shape, expected_shape): diff --git a/tests/test_torchvision_fc_model.py b/tests/networks/nets/test_torchvision_fc_model.py similarity index 99% rename from tests/test_torchvision_fc_model.py rename to tests/networks/nets/test_torchvision_fc_model.py index e6bf3f1e7a..7e03b4ebb9 100644 --- a/tests/test_torchvision_fc_model.py +++ b/tests/networks/nets/test_torchvision_fc_model.py @@ -153,7 +153,6 @@ class TestTorchVisionFCModel(unittest.TestCase): - @parameterized.expand( [TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7] + ([TEST_CASE_8] if has_enum else []) @@ -188,7 +187,6 @@ def test_with_pretrained(self, input_param, input_shape, expected_shape, expecte class TestLookup(unittest.TestCase): - def test_get_module(self): net = UNet(spatial_dims=2, in_channels=1, out_channels=1, channels=(4, 8, 16, 32, 64), strides=(2, 2, 2, 2)) self.assertEqual(look_up_named_module("", net), net) diff --git a/tests/test_transchex.py b/tests/networks/nets/test_transchex.py similarity index 99% rename from tests/test_transchex.py rename to tests/networks/nets/test_transchex.py index 0940cf62ab..f10c914f0d 100644 --- a/tests/test_transchex.py +++ b/tests/networks/nets/test_transchex.py @@ -47,7 +47,6 @@ @skip_if_quick class TestTranschex(unittest.TestCase): - @parameterized.expand(TEST_CASE_TRANSCHEX) def test_shape(self, input_param, expected_shape): net = Transchex(**input_param) diff --git a/tests/test_transformer.py b/tests/networks/nets/test_transformer.py similarity index 99% rename from tests/test_transformer.py rename to tests/networks/nets/test_transformer.py index fea5d023bf..f9264ba153 100644 --- a/tests/test_transformer.py +++ b/tests/networks/nets/test_transformer.py @@ -68,7 +68,6 @@ def test_attention_dim_not_multiple_of_heads(self): @skipUnless(has_einops, "Requires einops") def test_dropout_rate_negative(self): - with self.assertRaises(ValueError): DecoderOnlyTransformer( num_tokens=10, diff --git a/tests/test_unet.py b/tests/networks/nets/test_unet.py similarity index 99% rename from tests/test_unet.py rename to tests/networks/nets/test_unet.py index 41310eca2b..7a6d0e98bb 100644 --- a/tests/test_unet.py +++ b/tests/networks/nets/test_unet.py @@ -165,7 +165,6 @@ class TestUNET(unittest.TestCase): - @parameterized.expand(CASES) def test_shape(self, input_param, input_shape, expected_shape): net = UNet(**input_param).to(device) diff --git a/tests/test_unetr.py b/tests/networks/nets/test_unetr.py similarity index 99% rename from tests/test_unetr.py rename to tests/networks/nets/test_unetr.py index 7aef81c184..9e37750b4a 100644 --- a/tests/test_unetr.py +++ b/tests/networks/nets/test_unetr.py @@ -57,7 +57,6 @@ @skip_if_quick class TestUNETR(unittest.TestCase): - @parameterized.expand(TEST_CASE_UNETR) def test_shape(self, input_param, input_shape, expected_shape): net = UNETR(**input_param) diff --git a/tests/test_varautoencoder.py b/tests/networks/nets/test_varautoencoder.py similarity index 99% rename from tests/test_varautoencoder.py rename to tests/networks/nets/test_varautoencoder.py index aaaa11886c..459c537c55 100644 --- a/tests/test_varautoencoder.py +++ b/tests/networks/nets/test_varautoencoder.py @@ -108,7 +108,6 @@ class TestVarAutoEncoder(unittest.TestCase): - @parameterized.expand(CASES) def test_shape(self, input_param, input_shape, expected_shape): net = VarAutoEncoder(**input_param).to(device) diff --git a/tests/test_vista3d.py b/tests/networks/nets/test_vista3d.py similarity index 99% rename from tests/test_vista3d.py rename to tests/networks/nets/test_vista3d.py index 05b40b5beb..bdf424f69e 100644 --- a/tests/test_vista3d.py +++ b/tests/networks/nets/test_vista3d.py @@ -57,7 +57,6 @@ @SkipIfBeforePyTorchVersion((1, 11)) @skip_if_quick class TestVista3d(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_vista3d_shape(self, args, input_params, input_shape, expected_shape): segresnet = SegResNetDS2( diff --git a/tests/test_vit.py b/tests/networks/nets/test_vit.py similarity index 99% rename from tests/test_vit.py rename to tests/networks/nets/test_vit.py index ba1659f01a..56b7807449 100644 --- a/tests/test_vit.py +++ b/tests/networks/nets/test_vit.py @@ -61,7 +61,6 @@ @skip_if_quick class TestViT(unittest.TestCase): - @parameterized.expand(TEST_CASE_Vit) def test_shape(self, input_param, input_shape, expected_shape): net = ViT(**input_param) diff --git a/tests/test_vitautoenc.py b/tests/networks/nets/test_vitautoenc.py similarity index 99% rename from tests/test_vitautoenc.py rename to tests/networks/nets/test_vitautoenc.py index 00eb3e12e7..97f144aa2d 100644 --- a/tests/test_vitautoenc.py +++ b/tests/networks/nets/test_vitautoenc.py @@ -66,7 +66,6 @@ @skip_if_quick class TestVitAutoenc(unittest.TestCase): - def setUp(self): self.threads = torch.get_num_threads() torch.set_num_threads(4) diff --git a/tests/test_vnet.py b/tests/networks/nets/test_vnet.py similarity index 99% rename from tests/test_vnet.py rename to tests/networks/nets/test_vnet.py index f0d8989813..6c93893480 100644 --- a/tests/test_vnet.py +++ b/tests/networks/nets/test_vnet.py @@ -55,7 +55,6 @@ class TestVNet(unittest.TestCase): - @parameterized.expand( [ TEST_CASE_VNET_2D_1, diff --git a/tests/test_voxelmorph.py b/tests/networks/nets/test_voxelmorph.py similarity index 99% rename from tests/test_voxelmorph.py rename to tests/networks/nets/test_voxelmorph.py index fc302df071..1a04bab568 100644 --- a/tests/test_voxelmorph.py +++ b/tests/networks/nets/test_voxelmorph.py @@ -245,7 +245,6 @@ class TestVOXELMORPH(unittest.TestCase): - @parameterized.expand(CASES) def test_shape(self, input_param, input_shape, expected_shape): net = VoxelMorphUNet(**input_param).to(device) diff --git a/tests/test_vqvae.py b/tests/networks/nets/test_vqvae.py similarity index 100% rename from tests/test_vqvae.py rename to tests/networks/nets/test_vqvae.py diff --git a/tests/test_vqvaetransformer_inferer.py b/tests/networks/nets/test_vqvaetransformer_inferer.py similarity index 100% rename from tests/test_vqvaetransformer_inferer.py rename to tests/networks/nets/test_vqvaetransformer_inferer.py diff --git a/tests/networks/schedulers/__init__.py b/tests/networks/schedulers/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/schedulers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_scheduler_ddim.py b/tests/networks/schedulers/test_scheduler_ddim.py similarity index 100% rename from tests/test_scheduler_ddim.py rename to tests/networks/schedulers/test_scheduler_ddim.py diff --git a/tests/test_scheduler_ddpm.py b/tests/networks/schedulers/test_scheduler_ddpm.py similarity index 100% rename from tests/test_scheduler_ddpm.py rename to tests/networks/schedulers/test_scheduler_ddpm.py diff --git a/tests/test_scheduler_pndm.py b/tests/networks/schedulers/test_scheduler_pndm.py similarity index 100% rename from tests/test_scheduler_pndm.py rename to tests/networks/schedulers/test_scheduler_pndm.py diff --git a/tests/test_bundle_onnx_export.py b/tests/networks/test_bundle_onnx_export.py similarity index 91% rename from tests/test_bundle_onnx_export.py rename to tests/networks/test_bundle_onnx_export.py index 6453f47fd5..85f908c185 100644 --- a/tests/test_bundle_onnx_export.py +++ b/tests/networks/test_bundle_onnx_export.py @@ -14,6 +14,7 @@ import os import tempfile import unittest +from pathlib import Path from parameterized import parameterized @@ -29,7 +30,6 @@ @SkipIfNoModule("onnx") @SkipIfBeforePyTorchVersion((1, 10)) class TestONNXExport(unittest.TestCase): - def setUp(self): self.device = os.environ.get("CUDA_VISIBLE_DEVICES") if not self.device: @@ -43,8 +43,9 @@ def tearDown(self): @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_onnx_export(self, use_trace): - meta_file = os.path.join(os.path.dirname(__file__), "testing_data", "metadata.json") - config_file = os.path.join(os.path.dirname(__file__), "testing_data", "inference.json") + tests_path = Path(__file__).parents[1] + meta_file = os.path.join(tests_path, "testing_data", "metadata.json") + config_file = os.path.join(tests_path, "testing_data", "inference.json") with tempfile.TemporaryDirectory() as tempdir: def_args = {"meta_file": "will be replaced by `meta_file` arg"} def_args_file = os.path.join(tempdir, "def_args.yaml") diff --git a/tests/test_convert_to_onnx.py b/tests/networks/test_convert_to_onnx.py similarity index 99% rename from tests/test_convert_to_onnx.py rename to tests/networks/test_convert_to_onnx.py index 23e5951b85..743630c67d 100644 --- a/tests/test_convert_to_onnx.py +++ b/tests/networks/test_convert_to_onnx.py @@ -43,7 +43,6 @@ @SkipIfBeforePyTorchVersion((1, 9)) @skip_if_quick class TestConvertToOnnx(unittest.TestCase): - @parameterized.expand(TESTS) def test_unet(self, device, use_trace, use_ort): if use_ort: diff --git a/tests/test_convert_to_torchscript.py b/tests/networks/test_convert_to_torchscript.py similarity index 100% rename from tests/test_convert_to_torchscript.py rename to tests/networks/test_convert_to_torchscript.py diff --git a/tests/test_convert_to_trt.py b/tests/networks/test_convert_to_trt.py similarity index 99% rename from tests/test_convert_to_trt.py rename to tests/networks/test_convert_to_trt.py index 18f2e6d13c..8d3dd9e648 100644 --- a/tests/test_convert_to_trt.py +++ b/tests/networks/test_convert_to_trt.py @@ -40,7 +40,6 @@ @skip_if_quick @SkipIfBeforeComputeCapabilityVersion((7, 5)) class TestConvertToTRT(unittest.TestCase): - def setUp(self): self.gpu_device = torch.cuda.current_device() diff --git a/tests/test_save_state.py b/tests/networks/test_save_state.py similarity index 100% rename from tests/test_save_state.py rename to tests/networks/test_save_state.py diff --git a/tests/test_to_onehot.py b/tests/networks/test_to_onehot.py similarity index 100% rename from tests/test_to_onehot.py rename to tests/networks/test_to_onehot.py diff --git a/tests/test_varnet.py b/tests/networks/test_varnet.py similarity index 99% rename from tests/test_varnet.py rename to tests/networks/test_varnet.py index b1f38dd30c..23e2ff5a36 100644 --- a/tests/test_varnet.py +++ b/tests/networks/test_varnet.py @@ -32,7 +32,6 @@ class TestVarNet(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, coil_sens_model, refinement_model, num_cascades, input_shape, expected_shape): net = VariationalNetworkModel(coil_sens_model, refinement_model, num_cascades).to(device) diff --git a/tests/networks/utils/__init__.py b/tests/networks/utils/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/networks/utils/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_copy_model_state.py b/tests/networks/utils/test_copy_model_state.py similarity index 100% rename from tests/test_copy_model_state.py rename to tests/networks/utils/test_copy_model_state.py diff --git a/tests/test_eval_mode.py b/tests/networks/utils/test_eval_mode.py similarity index 100% rename from tests/test_eval_mode.py rename to tests/networks/utils/test_eval_mode.py diff --git a/tests/test_freeze_layers.py b/tests/networks/utils/test_freeze_layers.py similarity index 95% rename from tests/test_freeze_layers.py rename to tests/networks/utils/test_freeze_layers.py index 7be8e576bf..46b2e461f8 100644 --- a/tests/test_freeze_layers.py +++ b/tests/networks/utils/test_freeze_layers.py @@ -18,7 +18,7 @@ from monai.networks.utils import freeze_layers from monai.utils import set_determinism -from tests.test_copy_model_state import _TestModelOne, _TestModelTwo +from tests.networks.utils.test_copy_model_state import _TestModelOne, _TestModelTwo TEST_CASES = [] __devices = ("cpu", "cuda") if torch.cuda.is_available() else ("cpu",) @@ -27,7 +27,6 @@ class TestModuleState(unittest.TestCase): - def tearDown(self): set_determinism(None) diff --git a/tests/test_replace_module.py b/tests/networks/utils/test_replace_module.py similarity index 99% rename from tests/test_replace_module.py rename to tests/networks/utils/test_replace_module.py index d3fc105292..9c0752d1c5 100644 --- a/tests/test_replace_module.py +++ b/tests/networks/utils/test_replace_module.py @@ -32,7 +32,6 @@ class TestReplaceModule(unittest.TestCase): - def setUp(self): self.net = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3) self.num_relus = self.get_num_modules(torch.nn.ReLU) diff --git a/tests/test_train_mode.py b/tests/networks/utils/test_train_mode.py similarity index 100% rename from tests/test_train_mode.py rename to tests/networks/utils/test_train_mode.py diff --git a/tests/ngc_bundle_download.py b/tests/ngc_bundle_download.py index ee34451d75..7953f6201b 100644 --- a/tests/ngc_bundle_download.py +++ b/tests/ngc_bundle_download.py @@ -70,7 +70,6 @@ @skip_if_windows class TestNgcBundleDownload(unittest.TestCase): - @parameterized.expand([TEST_CASE_NGC_1, TEST_CASE_NGC_2]) @skip_if_quick def test_ngc_download_bundle(self, bundle_name, version, remove_prefix, download_name, file_path, hash_val): @@ -102,7 +101,6 @@ def test_ngc_download_bundle(self, bundle_name, version, remove_prefix, download @unittest.skip("deprecating mmar tests") class TestAllDownloadingMMAR(unittest.TestCase): - def setUp(self): print_debug_info() self.test_dir = "./" diff --git a/tests/optimizers/__init__.py b/tests/optimizers/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/optimizers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_generate_param_groups.py b/tests/optimizers/test_generate_param_groups.py similarity index 99% rename from tests/test_generate_param_groups.py rename to tests/optimizers/test_generate_param_groups.py index 8c49a432b2..58e743a7f6 100644 --- a/tests/test_generate_param_groups.py +++ b/tests/optimizers/test_generate_param_groups.py @@ -68,7 +68,6 @@ class TestGenerateParamGroups(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6]) def test_lr_values(self, input_param, expected_values, expected_groups): device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/tests/test_lr_finder.py b/tests/optimizers/test_lr_finder.py similarity index 96% rename from tests/test_lr_finder.py rename to tests/optimizers/test_lr_finder.py index e53539f6fd..15edeeb367 100644 --- a/tests/test_lr_finder.py +++ b/tests/optimizers/test_lr_finder.py @@ -11,11 +11,11 @@ from __future__ import annotations -import os import pickle import random import sys import unittest +from pathlib import Path from typing import TYPE_CHECKING import torch @@ -48,11 +48,10 @@ @unittest.skipUnless(sys.platform == "linux", "requires linux") @unittest.skipUnless(has_pil, "requires PIL") class TestLRFinder(unittest.TestCase): - def setUp(self): self.root_dir = MONAIEnvVars.data_dir() if not self.root_dir: - self.root_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + self.root_dir = Path(__file__).parents[1] / "testing_data" self.transforms = Compose( [ diff --git a/tests/test_lr_scheduler.py b/tests/optimizers/test_lr_scheduler.py similarity index 100% rename from tests/test_lr_scheduler.py rename to tests/optimizers/test_lr_scheduler.py diff --git a/tests/test_optim_novograd.py b/tests/optimizers/test_optim_novograd.py similarity index 100% rename from tests/test_optim_novograd.py rename to tests/optimizers/test_optim_novograd.py diff --git a/tests/padders.py b/tests/padders.py index 94f3fa76bc..67ae9cef08 100644 --- a/tests/padders.py +++ b/tests/padders.py @@ -51,7 +51,6 @@ class PadTest(unittest.TestCase): - @staticmethod def get_arr(shape): return np.random.randint(100, size=shape).astype(float) diff --git a/tests/profile_subclass/__init__.py b/tests/profile_subclass/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/profile_subclass/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/runner.py b/tests/runner.py index 385ca4256f..8079a26091 100644 --- a/tests/runner.py +++ b/tests/runner.py @@ -12,13 +12,13 @@ from __future__ import annotations import argparse -import glob import inspect import os import re import sys import time import unittest +from pathlib import Path from monai.utils import PerfContext @@ -45,7 +45,7 @@ def stopTest(self, test): # noqa: N802 name = self.getDescription(test) self.stream.write(f"Finished test: {name} ({elapsed:.03}s)\n") if name in results: - raise AssertionError("expected all keys to be unique") + raise AssertionError(f"expected all keys to be unique, but {name} is duplicated") results[name] = elapsed super().stopTest(test) @@ -124,13 +124,22 @@ def get_default_pattern(loader): # Get all test names (optionally from some path with some pattern) with PerfContext() as pc: # the files are searched from `tests/` folder, starting with `test_` - files = glob.glob(os.path.join(os.path.dirname(__file__), "test_*.py")) + tests_path = Path(__file__).parent / args.path + files = { + file.relative_to(tests_path).as_posix() + for file in tests_path.rglob("test_*py") + if re.search(args.pattern, file.name[:-3]) + } + print(files) cases = [] - for test_module in {os.path.basename(f)[:-3] for f in files}: - if re.match(args.pattern, test_module): - cases.append(f"tests.{test_module}") + for test_module in tests_path.rglob("test_*py"): + test_file = str(test_module.relative_to(tests_path).as_posix()) + case_str = test_file.replace("/", ".")[:-3] + case_str = f"tests.{case_str}" + if test_file in files: + cases.append(case_str) else: - print(f"monai test runner: excluding tests.{test_module}") + print(f"monai test runner: excluding {test_module.name}") print(cases) tests = unittest.TestLoader().loadTestsFromNames(cases) discovery_time = pc.total_time @@ -139,7 +148,6 @@ def get_default_pattern(loader): test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) - # Use try catches to print the current results if encountering exception or keyboard interruption try: test_result = test_runner.run(tests) diff --git a/tests/test_auto3dseg.py b/tests/test_auto3dseg.py index beeaece760..6c0d8123d7 100644 --- a/tests/test_auto3dseg.py +++ b/tests/test_auto3dseg.py @@ -171,7 +171,6 @@ def __call__(self, data): class TestDataAnalyzer(unittest.TestCase): - def setUp(self): self.test_dir = tempfile.TemporaryDirectory() work_dir = self.test_dir.name diff --git a/tests/test_call_dist.py b/tests/test_call_dist.py index 63ab5982c7..4d547906a4 100644 --- a/tests/test_call_dist.py +++ b/tests/test_call_dist.py @@ -17,7 +17,6 @@ class DistributedCallTest(DistTestCase): - def test_constructor(self): with self.assertRaises(ValueError): DistCall(nnodes=1, nproc_per_node=0) diff --git a/tests/test_masked_autoencoder_vit.py b/tests/test_masked_autoencoder_vit.py index 973fbab662..b649c1266c 100644 --- a/tests/test_masked_autoencoder_vit.py +++ b/tests/test_masked_autoencoder_vit.py @@ -81,7 +81,6 @@ @skip_if_quick class TestMaskedAutoencoderViT(unittest.TestCase): - @parameterized.expand(TEST_CASE_MaskedAutoEncoderViT) def test_shape(self, input_param, input_shape, expected_shape): net = MaskedAutoEncoderViT(**input_param) diff --git a/tests/test_query_memory.py b/tests/test_query_memory.py index fd703c4013..97e618e015 100644 --- a/tests/test_query_memory.py +++ b/tests/test_query_memory.py @@ -17,7 +17,6 @@ class TestQueryMemory(unittest.TestCase): - def test_output_str(self): self.assertTrue(isinstance(query_memory(2), str)) all_device = query_memory(-1) diff --git a/tests/test_rand_torchiod.py b/tests/test_rand_torchiod.py index 041dec8e08..97282eb6ba 100644 --- a/tests/test_rand_torchiod.py +++ b/tests/test_rand_torchiod.py @@ -31,7 +31,6 @@ @skipUnless(has_torchio, "Requires torchio") class TestRandTorchIOd(unittest.TestCase): - @parameterized.expand(TEST_PARAMS) def test_random_transform(self, input_param, input_data): set_determinism(seed=0) diff --git a/tests/test_set_visible_devices.py b/tests/test_set_visible_devices.py index 077a382962..f02f3f690f 100644 --- a/tests/test_set_visible_devices.py +++ b/tests/test_set_visible_devices.py @@ -18,7 +18,6 @@ class TestVisibleDevices(unittest.TestCase): - @staticmethod def run_process_and_get_exit_code(code_to_execute): value = os.system(code_to_execute) diff --git a/tests/test_timedcall_dist.py b/tests/test_timedcall_dist.py index 6c2cc7a653..28b4ab9306 100644 --- a/tests/test_timedcall_dist.py +++ b/tests/test_timedcall_dist.py @@ -50,7 +50,6 @@ def case_1_seconds_bad(arg=None): class TestTimedCall(unittest.TestCase): - def test_good_call(self): output = case_1_seconds() self.assertEqual(output, "good") diff --git a/tests/test_torchiod.py b/tests/test_torchiod.py index b4edc763d2..fadde6848c 100644 --- a/tests/test_torchiod.py +++ b/tests/test_torchiod.py @@ -36,7 +36,6 @@ @skipUnless(has_torchio, "Requires torchio") class TestTorchIOd(unittest.TestCase): - @parameterized.expand(TEST_PARAMS) def test_value(self, input_param, input_data, expected_value): result = TorchIOd(**input_param)(input_data) diff --git a/tests/transforms/__init__.py b/tests/transforms/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/transforms/compose/__init__.py b/tests/transforms/compose/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/compose/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_compose.py b/tests/transforms/compose/test_compose.py similarity index 100% rename from tests/test_compose.py rename to tests/transforms/compose/test_compose.py diff --git a/tests/test_some_of.py b/tests/transforms/compose/test_some_of.py similarity index 98% rename from tests/test_some_of.py rename to tests/transforms/compose/test_some_of.py index 3723732d51..7eda815d37 100644 --- a/tests/test_some_of.py +++ b/tests/transforms/compose/test_some_of.py @@ -26,8 +26,8 @@ from monai.transforms.compose import Compose, SomeOf from monai.utils import set_determinism from monai.utils.enums import TraceKeys -from tests.test_one_of import NonInv -from tests.test_random_order import InvC, InvD +from tests.integration.test_one_of import NonInv +from tests.transforms.test_random_order import InvC, InvD class A(Transform): diff --git a/tests/transforms/croppad/__init__.py b/tests/transforms/croppad/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/croppad/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_rand_weighted_crop.py b/tests/transforms/croppad/test_rand_weighted_crop.py similarity index 100% rename from tests/test_rand_weighted_crop.py rename to tests/transforms/croppad/test_rand_weighted_crop.py diff --git a/tests/test_rand_weighted_cropd.py b/tests/transforms/croppad/test_rand_weighted_cropd.py similarity index 99% rename from tests/test_rand_weighted_cropd.py rename to tests/transforms/croppad/test_rand_weighted_cropd.py index 5c432f8605..4fe0e1140a 100644 --- a/tests/test_rand_weighted_cropd.py +++ b/tests/transforms/croppad/test_rand_weighted_cropd.py @@ -148,7 +148,6 @@ def get_data(ndim): class TestRandWeightedCrop(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_weighted_cropd(self, _, init_params, input_data, expected_shape, expected_centers): crop = RandWeightedCropd(**init_params) diff --git a/tests/transforms/functional/__init__.py b/tests/transforms/functional/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/functional/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_apply.py b/tests/transforms/functional/test_apply.py similarity index 99% rename from tests/test_apply.py rename to tests/transforms/functional/test_apply.py index 62300bb494..c7a0678af6 100644 --- a/tests/test_apply.py +++ b/tests/transforms/functional/test_apply.py @@ -39,7 +39,6 @@ def single_2d_transform_cases(): class TestApply(unittest.TestCase): - def _test_apply_impl(self, tensor, pending_transforms, expected_shape): result = apply_pending(tensor, pending_transforms) self.assertListEqual(result[1], pending_transforms) diff --git a/tests/test_resample.py b/tests/transforms/functional/test_resample.py similarity index 99% rename from tests/test_resample.py rename to tests/transforms/functional/test_resample.py index 3c5742d14e..40d264598d 100644 --- a/tests/test_resample.py +++ b/tests/transforms/functional/test_resample.py @@ -35,7 +35,6 @@ def rotate_90_2d(): class TestResampleFunction(unittest.TestCase): - @parameterized.expand(RESAMPLE_FUNCTION_CASES) def test_resample_function_impl(self, img, matrix, expected): out = resample(convert_to_tensor(img), matrix, {"lazy_shape": img.shape[1:], "lazy_padding_mode": "border"}) diff --git a/tests/transforms/intensity/__init__.py b/tests/transforms/intensity/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/intensity/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_compute_ho_ver_maps.py b/tests/transforms/intensity/test_compute_ho_ver_maps.py similarity index 99% rename from tests/test_compute_ho_ver_maps.py rename to tests/transforms/intensity/test_compute_ho_ver_maps.py index b1e949b0be..046299b63d 100644 --- a/tests/test_compute_ho_ver_maps.py +++ b/tests/transforms/intensity/test_compute_ho_ver_maps.py @@ -62,7 +62,6 @@ @unittest.skipUnless(has_skimage, "Requires scikit-image library.") class ComputeHoVerMapsTests(unittest.TestCase): - @parameterized.expand(TESTS) def test_horizontal_certical_maps(self, in_type, arguments, mask, hv_mask): input_image = in_type(mask) diff --git a/tests/test_compute_ho_ver_maps_d.py b/tests/transforms/intensity/test_compute_ho_ver_maps_d.py similarity index 99% rename from tests/test_compute_ho_ver_maps_d.py rename to tests/transforms/intensity/test_compute_ho_ver_maps_d.py index 7d57492250..b35a574c28 100644 --- a/tests/test_compute_ho_ver_maps_d.py +++ b/tests/transforms/intensity/test_compute_ho_ver_maps_d.py @@ -63,7 +63,6 @@ @unittest.skipUnless(has_skimage, "Requires scikit-image library.") class ComputeHoVerMapsDictTests(unittest.TestCase): - @parameterized.expand(TESTS) def test_horizontal_certical_maps(self, in_type, arguments, mask, hv_mask): hv_key = list(hv_mask.keys())[0] diff --git a/tests/test_foreground_mask.py b/tests/transforms/intensity/test_foreground_mask.py similarity index 99% rename from tests/test_foreground_mask.py rename to tests/transforms/intensity/test_foreground_mask.py index b6c7d3a56c..67543c832e 100644 --- a/tests/test_foreground_mask.py +++ b/tests/transforms/intensity/test_foreground_mask.py @@ -81,7 +81,6 @@ @unittest.skipUnless(has_skimage, "Requires sci-kit image") class TestForegroundMask(unittest.TestCase): - @parameterized.expand(TESTS) def test_foreground_mask(self, in_type, arguments, image, mask): input_image = in_type(image) diff --git a/tests/test_foreground_maskd.py b/tests/transforms/intensity/test_foreground_maskd.py similarity index 99% rename from tests/test_foreground_maskd.py rename to tests/transforms/intensity/test_foreground_maskd.py index 48ef68e7c0..888dd55b05 100644 --- a/tests/test_foreground_maskd.py +++ b/tests/transforms/intensity/test_foreground_maskd.py @@ -89,7 +89,6 @@ @unittest.skipUnless(has_skimage, "Requires sci-kit image") class TestForegroundMaskd(unittest.TestCase): - @parameterized.expand(TESTS) def test_foreground_mask(self, in_type, arguments, data_dict, mask): data_dict[arguments["keys"]] = in_type(data_dict[arguments["keys"]]) diff --git a/tests/test_rand_histogram_shiftd.py b/tests/transforms/intensity/test_rand_histogram_shiftd.py similarity index 99% rename from tests/test_rand_histogram_shiftd.py rename to tests/transforms/intensity/test_rand_histogram_shiftd.py index 5e971a1827..75aafa7f08 100644 --- a/tests/test_rand_histogram_shiftd.py +++ b/tests/transforms/intensity/test_rand_histogram_shiftd.py @@ -61,7 +61,6 @@ class TestRandHistogramShiftD(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_histogram_shiftd(self, input_param, input_data, expected_val): g = RandHistogramShiftd(**input_param) diff --git a/tests/test_scale_intensity_range_percentiles.py b/tests/transforms/intensity/test_scale_intensity_range_percentiles.py similarity index 99% rename from tests/test_scale_intensity_range_percentiles.py rename to tests/transforms/intensity/test_scale_intensity_range_percentiles.py index bd26497f3e..20fe59246b 100644 --- a/tests/test_scale_intensity_range_percentiles.py +++ b/tests/transforms/intensity/test_scale_intensity_range_percentiles.py @@ -21,7 +21,6 @@ class TestScaleIntensityRangePercentiles(NumpyImageTestCase2D): - def test_scaling(self): img = self.imt[0] lower = 10 diff --git a/tests/test_scale_intensity_range_percentilesd.py b/tests/transforms/intensity/test_scale_intensity_range_percentilesd.py similarity index 99% rename from tests/test_scale_intensity_range_percentilesd.py rename to tests/transforms/intensity/test_scale_intensity_range_percentilesd.py index 2dd1642cff..8e152d62ed 100644 --- a/tests/test_scale_intensity_range_percentilesd.py +++ b/tests/transforms/intensity/test_scale_intensity_range_percentilesd.py @@ -20,7 +20,6 @@ class TestScaleIntensityRangePercentilesd(NumpyImageTestCase2D): - def test_scaling(self): img = self.imt lower = 10 diff --git a/tests/transforms/inverse/__init__.py b/tests/transforms/inverse/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/inverse/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_inverse_array.py b/tests/transforms/inverse/test_inverse_array.py similarity index 99% rename from tests/test_inverse_array.py rename to tests/transforms/inverse/test_inverse_array.py index 140f03c110..3c5539bb96 100644 --- a/tests/test_inverse_array.py +++ b/tests/transforms/inverse/test_inverse_array.py @@ -33,7 +33,6 @@ @unittest.skipUnless(has_nib, "Requires nibabel") class TestInverseArray(unittest.TestCase): - @staticmethod def get_image(dtype, device) -> MetaTensor: affine = torch.tensor([[0, 0, 1, 0], [-1, 0, 0, 0], [0, 10, 0, 0], [0, 0, 0, 1]]).to(dtype).to(device) diff --git a/tests/test_traceable_transform.py b/tests/transforms/inverse/test_traceable_transform.py similarity index 100% rename from tests/test_traceable_transform.py rename to tests/transforms/inverse/test_traceable_transform.py diff --git a/tests/transforms/post/__init__.py b/tests/transforms/post/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/post/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_label_filterd.py b/tests/transforms/post/test_label_filterd.py similarity index 99% rename from tests/test_label_filterd.py rename to tests/transforms/post/test_label_filterd.py index cff540567c..b2b575f9d2 100644 --- a/tests/test_label_filterd.py +++ b/tests/transforms/post/test_label_filterd.py @@ -58,7 +58,6 @@ class TestLabelFilter(unittest.TestCase): - @parameterized.expand(VALID_TESTS) def test_correct_results(self, _, args, input_image, expected): converter = LabelFilterd(keys="image", **args) diff --git a/tests/test_probnms.py b/tests/transforms/post/test_probnms.py similarity index 99% rename from tests/test_probnms.py rename to tests/transforms/post/test_probnms.py index 4cba908b39..4e7cebe0e8 100644 --- a/tests/test_probnms.py +++ b/tests/transforms/post/test_probnms.py @@ -61,7 +61,6 @@ class TestProbNMS(unittest.TestCase): - @parameterized.expand(TESTS) def test_output(self, class_args, probs_map, expected): nms = ProbNMS(**class_args) diff --git a/tests/test_probnmsd.py b/tests/transforms/post/test_probnmsd.py similarity index 99% rename from tests/test_probnmsd.py rename to tests/transforms/post/test_probnmsd.py index b4c8a37c95..8f18ad9788 100644 --- a/tests/test_probnmsd.py +++ b/tests/transforms/post/test_probnmsd.py @@ -68,7 +68,6 @@ class TestProbNMS(unittest.TestCase): - @parameterized.expand(TESTS) def test_output(self, class_args, probs_map, expected): nms = ProbNMSD(keys="prob_map", **class_args) diff --git a/tests/test_remove_small_objects.py b/tests/transforms/post/test_remove_small_objects.py similarity index 99% rename from tests/test_remove_small_objects.py rename to tests/transforms/post/test_remove_small_objects.py index 1324fc55f6..c0d8f59be7 100644 --- a/tests/test_remove_small_objects.py +++ b/tests/transforms/post/test_remove_small_objects.py @@ -55,7 +55,6 @@ @SkipIfNoModule("skimage.morphology") class TestRemoveSmallObjects(unittest.TestCase): - @parameterized.expand(TESTS) def test_remove_small_objects(self, dtype, im_type, lbl, expected, params=None): params = params or {} diff --git a/tests/transforms/spatial/__init__.py b/tests/transforms/spatial/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/spatial/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_convert_box_points.py b/tests/transforms/spatial/test_convert_box_points.py similarity index 99% rename from tests/test_convert_box_points.py rename to tests/transforms/spatial/test_convert_box_points.py index 1a21050b2c..a3ef68962b 100644 --- a/tests/test_convert_box_points.py +++ b/tests/transforms/spatial/test_convert_box_points.py @@ -99,7 +99,6 @@ class TestConvertBoxToPoints(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_convert_box_to_points(self, boxes, mode, expected_points): transform = ConvertBoxToPoints(mode=mode) @@ -108,7 +107,6 @@ def test_convert_box_to_points(self, boxes, mode, expected_points): class TestConvertPointsToBoxes(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_convert_box_to_points(self, boxes, mode, points): transform = ConvertPointsToBoxes() diff --git a/tests/test_grid_patch.py b/tests/transforms/spatial/test_grid_patch.py similarity index 99% rename from tests/test_grid_patch.py rename to tests/transforms/spatial/test_grid_patch.py index ce2f6b6b92..f6edea4d54 100644 --- a/tests/test_grid_patch.py +++ b/tests/transforms/spatial/test_grid_patch.py @@ -97,7 +97,6 @@ class TestGridPatch(unittest.TestCase): - @parameterized.expand(TEST_CASES) @SkipIfBeforePyTorchVersion((1, 11, 1)) def test_grid_patch(self, in_type, input_parameters, image, expected): diff --git a/tests/test_grid_patchd.py b/tests/transforms/spatial/test_grid_patchd.py similarity index 99% rename from tests/test_grid_patchd.py rename to tests/transforms/spatial/test_grid_patchd.py index 26b340297d..859a5a3890 100644 --- a/tests/test_grid_patchd.py +++ b/tests/transforms/spatial/test_grid_patchd.py @@ -77,7 +77,6 @@ class TestGridPatchd(unittest.TestCase): - @parameterized.expand(TEST_SINGLE) @SkipIfBeforePyTorchVersion((1, 11, 1)) def test_grid_patchd(self, in_type, input_parameters, image_dict, expected): diff --git a/tests/test_rand_grid_patch.py b/tests/transforms/spatial/test_rand_grid_patch.py similarity index 99% rename from tests/test_rand_grid_patch.py rename to tests/transforms/spatial/test_rand_grid_patch.py index efa4491375..b148f46c97 100644 --- a/tests/test_rand_grid_patch.py +++ b/tests/transforms/spatial/test_rand_grid_patch.py @@ -105,7 +105,6 @@ class TestRandGridPatch(unittest.TestCase): - def setUp(self): set_determinism(seed=1234) diff --git a/tests/test_rand_grid_patchd.py b/tests/transforms/spatial/test_rand_grid_patchd.py similarity index 99% rename from tests/test_rand_grid_patchd.py rename to tests/transforms/spatial/test_rand_grid_patchd.py index bc763b27b0..b9e9589e6d 100644 --- a/tests/test_rand_grid_patchd.py +++ b/tests/transforms/spatial/test_rand_grid_patchd.py @@ -85,7 +85,6 @@ class TestRandGridPatchd(unittest.TestCase): - def setUp(self): set_determinism(seed=1234) diff --git a/tests/test_spatial_resampled.py b/tests/transforms/spatial/test_spatial_resampled.py similarity index 99% rename from tests/test_spatial_resampled.py rename to tests/transforms/spatial/test_spatial_resampled.py index 0576b3a826..12d54cabfc 100644 --- a/tests/test_spatial_resampled.py +++ b/tests/transforms/spatial/test_spatial_resampled.py @@ -94,7 +94,6 @@ class TestSpatialResample(unittest.TestCase): - @parameterized.expand(TESTS) def test_flips_inverse(self, img, device, dst_affine, kwargs, expected_output): img = MetaTensor(img, affine=torch.eye(4)).to(device) diff --git a/tests/test_activations.py b/tests/transforms/test_activations.py similarity index 99% rename from tests/test_activations.py rename to tests/transforms/test_activations.py index 3f0f17f063..1a7740e77a 100644 --- a/tests/test_activations.py +++ b/tests/transforms/test_activations.py @@ -94,7 +94,6 @@ class TestActivations(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_value_shape(self, input_param, img, out, expected_shape): result = Activations(**input_param)(img) diff --git a/tests/test_activationsd.py b/tests/transforms/test_activationsd.py similarity index 99% rename from tests/test_activationsd.py rename to tests/transforms/test_activationsd.py index 42bd653f8a..b272f5f4ce 100644 --- a/tests/test_activationsd.py +++ b/tests/transforms/test_activationsd.py @@ -50,7 +50,6 @@ class TestActivationsd(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_value_shape(self, input_param, test_input, output, expected_shape): result = Activationsd(**input_param)(test_input) diff --git a/tests/test_adaptors.py b/tests/transforms/test_adaptors.py similarity index 100% rename from tests/test_adaptors.py rename to tests/transforms/test_adaptors.py diff --git a/tests/test_add_coordinate_channels.py b/tests/transforms/test_add_coordinate_channels.py similarity index 99% rename from tests/test_add_coordinate_channels.py rename to tests/transforms/test_add_coordinate_channels.py index b3c9130057..9a0e0dcd35 100644 --- a/tests/test_add_coordinate_channels.py +++ b/tests/transforms/test_add_coordinate_channels.py @@ -29,7 +29,6 @@ class TestAddCoordinateChannels(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, input_param, input, expected_shape): result = AddCoordinateChannels(**input_param)(input) diff --git a/tests/test_add_coordinate_channelsd.py b/tests/transforms/test_add_coordinate_channelsd.py similarity index 99% rename from tests/test_add_coordinate_channelsd.py rename to tests/transforms/test_add_coordinate_channelsd.py index ad5e64680c..4cc35142ef 100644 --- a/tests/test_add_coordinate_channelsd.py +++ b/tests/transforms/test_add_coordinate_channelsd.py @@ -42,7 +42,6 @@ class TestAddCoordinateChannels(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, input_param, input, expected_shape): result = AddCoordinateChannelsd(**input_param)(input)["img"] diff --git a/tests/test_add_extreme_points_channel.py b/tests/transforms/test_add_extreme_points_channel.py similarity index 99% rename from tests/test_add_extreme_points_channel.py rename to tests/transforms/test_add_extreme_points_channel.py index d395e07143..5a405efd63 100644 --- a/tests/test_add_extreme_points_channel.py +++ b/tests/transforms/test_add_extreme_points_channel.py @@ -69,7 +69,6 @@ class TestAddExtremePointsChannel(unittest.TestCase): - @parameterized.expand(TESTS) def test_correct_results(self, input_data, expected): add_extreme_points_channel = AddExtremePointsChannel() diff --git a/tests/test_add_extreme_points_channeld.py b/tests/transforms/test_add_extreme_points_channeld.py similarity index 99% rename from tests/test_add_extreme_points_channeld.py rename to tests/transforms/test_add_extreme_points_channeld.py index 775766400d..b11498385c 100644 --- a/tests/test_add_extreme_points_channeld.py +++ b/tests/transforms/test_add_extreme_points_channeld.py @@ -64,7 +64,6 @@ class TestAddExtremePointsChanneld(unittest.TestCase): - @parameterized.expand(TESTS) def test_correct_results(self, input_data, expected): add_extreme_points_channel = AddExtremePointsChanneld( diff --git a/tests/test_adjust_contrast.py b/tests/transforms/test_adjust_contrast.py similarity index 99% rename from tests/test_adjust_contrast.py rename to tests/transforms/test_adjust_contrast.py index b99edc75c4..3ea7074206 100644 --- a/tests/test_adjust_contrast.py +++ b/tests/transforms/test_adjust_contrast.py @@ -30,7 +30,6 @@ class TestAdjustContrast(NumpyImageTestCase2D): - @parameterized.expand(TESTS) def test_correct_results(self, gamma, invert_image, retain_stats): adjuster = AdjustContrast(gamma=gamma, invert_image=invert_image, retain_stats=retain_stats) diff --git a/tests/test_adjust_contrastd.py b/tests/transforms/test_adjust_contrastd.py similarity index 99% rename from tests/test_adjust_contrastd.py rename to tests/transforms/test_adjust_contrastd.py index 1eb88260ef..16317b3ec3 100644 --- a/tests/test_adjust_contrastd.py +++ b/tests/transforms/test_adjust_contrastd.py @@ -30,7 +30,6 @@ class TestAdjustContrastd(NumpyImageTestCase2D): - @parameterized.expand(TESTS) def test_correct_results(self, gamma, invert_image, retain_stats): adjuster = AdjustContrastd("img", gamma=gamma, invert_image=invert_image, retain_stats=retain_stats) diff --git a/tests/test_affine.py b/tests/transforms/test_affine.py similarity index 99% rename from tests/test_affine.py rename to tests/transforms/test_affine.py index d81f7d0836..90fb77e0ef 100644 --- a/tests/test_affine.py +++ b/tests/transforms/test_affine.py @@ -167,7 +167,6 @@ class TestAffine(unittest.TestCase): - @parameterized.expand(TESTS) def test_affine(self, input_param, input_data, expected_val): input_copy = deepcopy(input_data["img"]) @@ -200,7 +199,6 @@ def test_affine(self, input_param, input_data, expected_val): @unittest.skipUnless(optional_import("scipy")[1], "Requires scipy library.") class TestAffineConsistency(unittest.TestCase): - @parameterized.expand([[7], [8], [9]]) def test_affine_resize(self, s): """s""" diff --git a/tests/test_affine_grid.py b/tests/transforms/test_affine_grid.py similarity index 99% rename from tests/test_affine_grid.py rename to tests/transforms/test_affine_grid.py index 64155606b2..0cb8e35d1f 100644 --- a/tests/test_affine_grid.py +++ b/tests/transforms/test_affine_grid.py @@ -135,7 +135,6 @@ class TestAffineGrid(unittest.TestCase): - @parameterized.expand(TESTS) def test_affine_grid(self, input_param, input_data, expected_val): g = AffineGrid(**input_param) diff --git a/tests/test_affined.py b/tests/transforms/test_affined.py similarity index 99% rename from tests/test_affined.py rename to tests/transforms/test_affined.py index e9a928e7e7..05f918c728 100644 --- a/tests/test_affined.py +++ b/tests/transforms/test_affined.py @@ -168,7 +168,6 @@ class TestAffined(unittest.TestCase): - @parameterized.expand(TESTS) def test_affine(self, input_param, input_data, expected_val): input_copy = deepcopy(input_data) diff --git a/tests/test_as_channel_last.py b/tests/transforms/test_as_channel_last.py similarity index 99% rename from tests/test_as_channel_last.py rename to tests/transforms/test_as_channel_last.py index 991c7f0fac..11bd688dae 100644 --- a/tests/test_as_channel_last.py +++ b/tests/transforms/test_as_channel_last.py @@ -27,7 +27,6 @@ class TestAsChannelLast(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, in_type, input_param, expected_shape): test_data = in_type(np.random.randint(0, 2, size=[1, 2, 3, 4])) diff --git a/tests/test_as_channel_lastd.py b/tests/transforms/test_as_channel_lastd.py similarity index 99% rename from tests/test_as_channel_lastd.py rename to tests/transforms/test_as_channel_lastd.py index 42076a2a97..c3e5b31fb5 100644 --- a/tests/test_as_channel_lastd.py +++ b/tests/transforms/test_as_channel_lastd.py @@ -27,7 +27,6 @@ class TestAsChannelLastd(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, in_type, input_param, expected_shape): test_data = { diff --git a/tests/test_as_discrete.py b/tests/transforms/test_as_discrete.py similarity index 99% rename from tests/test_as_discrete.py rename to tests/transforms/test_as_discrete.py index e7c4c4a782..a83870e514 100644 --- a/tests/test_as_discrete.py +++ b/tests/transforms/test_as_discrete.py @@ -65,7 +65,6 @@ class TestAsDiscrete(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_value_shape(self, input_param, img, out, expected_shape): result = AsDiscrete(**input_param)(img) diff --git a/tests/test_as_discreted.py b/tests/transforms/test_as_discreted.py similarity index 99% rename from tests/test_as_discreted.py rename to tests/transforms/test_as_discreted.py index 8fc3c1fabf..3c29e820d0 100644 --- a/tests/test_as_discreted.py +++ b/tests/transforms/test_as_discreted.py @@ -68,7 +68,6 @@ class TestAsDiscreted(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_value_shape(self, input_param, test_input, output, expected_shape): result = AsDiscreted(**input_param)(test_input) diff --git a/tests/test_border_pad.py b/tests/transforms/test_border_pad.py similarity index 100% rename from tests/test_border_pad.py rename to tests/transforms/test_border_pad.py diff --git a/tests/test_border_padd.py b/tests/transforms/test_border_padd.py similarity index 100% rename from tests/test_border_padd.py rename to tests/transforms/test_border_padd.py diff --git a/tests/test_bounding_rect.py b/tests/transforms/test_bounding_rect.py similarity index 99% rename from tests/test_bounding_rect.py rename to tests/transforms/test_bounding_rect.py index 30fe66a8ce..4443f3dd1d 100644 --- a/tests/test_bounding_rect.py +++ b/tests/transforms/test_bounding_rect.py @@ -28,7 +28,6 @@ class TestBoundingRect(unittest.TestCase): - def setUp(self): monai.utils.set_determinism(1) diff --git a/tests/test_bounding_rectd.py b/tests/transforms/test_bounding_rectd.py similarity index 99% rename from tests/test_bounding_rectd.py rename to tests/transforms/test_bounding_rectd.py index 4e46805e76..c28ce9d941 100644 --- a/tests/test_bounding_rectd.py +++ b/tests/transforms/test_bounding_rectd.py @@ -28,7 +28,6 @@ class TestBoundingRectD(unittest.TestCase): - def setUp(self): monai.utils.set_determinism(1) diff --git a/tests/test_cast_to_type.py b/tests/transforms/test_cast_to_type.py similarity index 99% rename from tests/test_cast_to_type.py rename to tests/transforms/test_cast_to_type.py index 9e3c2d331c..b4fba0939d 100644 --- a/tests/test_cast_to_type.py +++ b/tests/transforms/test_cast_to_type.py @@ -37,7 +37,6 @@ class TestCastToType(unittest.TestCase): - @parameterized.expand(TESTS) def test_type(self, out_dtype, input_data, expected_type): result = CastToType(dtype=out_dtype)(input_data) diff --git a/tests/test_cast_to_typed.py b/tests/transforms/test_cast_to_typed.py similarity index 99% rename from tests/test_cast_to_typed.py rename to tests/transforms/test_cast_to_typed.py index 5be6dd2b9f..89eed795df 100644 --- a/tests/test_cast_to_typed.py +++ b/tests/transforms/test_cast_to_typed.py @@ -53,7 +53,6 @@ class TestCastToTyped(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_type(self, input_param, input_data, expected_type): result = CastToTyped(**input_param)(input_data) diff --git a/tests/test_center_scale_crop.py b/tests/transforms/test_center_scale_crop.py similarity index 100% rename from tests/test_center_scale_crop.py rename to tests/transforms/test_center_scale_crop.py diff --git a/tests/test_center_scale_cropd.py b/tests/transforms/test_center_scale_cropd.py similarity index 100% rename from tests/test_center_scale_cropd.py rename to tests/transforms/test_center_scale_cropd.py diff --git a/tests/test_center_spatial_crop.py b/tests/transforms/test_center_spatial_crop.py similarity index 100% rename from tests/test_center_spatial_crop.py rename to tests/transforms/test_center_spatial_crop.py diff --git a/tests/test_center_spatial_cropd.py b/tests/transforms/test_center_spatial_cropd.py similarity index 100% rename from tests/test_center_spatial_cropd.py rename to tests/transforms/test_center_spatial_cropd.py diff --git a/tests/test_classes_to_indices.py b/tests/transforms/test_classes_to_indices.py similarity index 99% rename from tests/test_classes_to_indices.py rename to tests/transforms/test_classes_to_indices.py index df7e367c73..6a1d5bf8f7 100644 --- a/tests/test_classes_to_indices.py +++ b/tests/transforms/test_classes_to_indices.py @@ -82,7 +82,6 @@ class TestClassesToIndices(unittest.TestCase): - @parameterized.expand(TESTS_CASES) def test_value(self, input_args, label, image, expected_indices): indices = ClassesToIndices(**input_args)(label, image) diff --git a/tests/test_classes_to_indicesd.py b/tests/transforms/test_classes_to_indicesd.py similarity index 99% rename from tests/test_classes_to_indicesd.py rename to tests/transforms/test_classes_to_indicesd.py index 829f31b594..1dca89ace8 100644 --- a/tests/test_classes_to_indicesd.py +++ b/tests/transforms/test_classes_to_indicesd.py @@ -97,7 +97,6 @@ class TestClassesToIndicesd(unittest.TestCase): - @parameterized.expand(TESTS_CASES) def test_value(self, input_args, input_data, expected_indices): result = ClassesToIndicesd(**input_args)(input_data) diff --git a/tests/test_clip_intensity_percentiles.py b/tests/transforms/test_clip_intensity_percentiles.py similarity index 99% rename from tests/test_clip_intensity_percentiles.py rename to tests/transforms/test_clip_intensity_percentiles.py index 2f71d2e894..18ed47dbaa 100644 --- a/tests/test_clip_intensity_percentiles.py +++ b/tests/transforms/test_clip_intensity_percentiles.py @@ -45,7 +45,6 @@ def test_soft_clip_func(im, lower, upper): class TestClipIntensityPercentiles2D(NumpyImageTestCase2D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_hard_clipping_two_sided(self, p): hard_clipper = ClipIntensityPercentiles(upper=95, lower=5) @@ -130,7 +129,6 @@ def test_ill_both_none(self): class TestClipIntensityPercentiles3D(NumpyImageTestCase3D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_hard_clipping_two_sided(self, p): hard_clipper = ClipIntensityPercentiles(upper=95, lower=5) diff --git a/tests/test_clip_intensity_percentilesd.py b/tests/transforms/test_clip_intensity_percentilesd.py similarity index 98% rename from tests/test_clip_intensity_percentilesd.py rename to tests/transforms/test_clip_intensity_percentilesd.py index d151a6065b..ddf4d1a581 100644 --- a/tests/test_clip_intensity_percentilesd.py +++ b/tests/transforms/test_clip_intensity_percentilesd.py @@ -19,12 +19,10 @@ from monai.transforms.utils_pytorch_numpy_unification import clip, percentile from monai.utils.type_conversion import convert_to_tensor from tests.test_utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose - -from .test_clip_intensity_percentiles import test_hard_clip_func, test_soft_clip_func +from tests.transforms.test_clip_intensity_percentiles import test_hard_clip_func, test_soft_clip_func class TestClipIntensityPercentilesd2D(NumpyImageTestCase2D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_hard_clipping_two_sided(self, p): key = "img" @@ -121,7 +119,6 @@ def test_ill_both_none(self): class TestClipIntensityPercentilesd3D(NumpyImageTestCase3D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_hard_clipping_two_sided(self, p): key = "img" diff --git a/tests/test_compose_get_number_conversions.py b/tests/transforms/test_compose_get_number_conversions.py similarity index 100% rename from tests/test_compose_get_number_conversions.py rename to tests/transforms/test_compose_get_number_conversions.py diff --git a/tests/test_concat_itemsd.py b/tests/transforms/test_concat_itemsd.py similarity index 99% rename from tests/test_concat_itemsd.py rename to tests/transforms/test_concat_itemsd.py index b1d461cac8..823c6283e7 100644 --- a/tests/test_concat_itemsd.py +++ b/tests/transforms/test_concat_itemsd.py @@ -22,7 +22,6 @@ class TestConcatItemsd(unittest.TestCase): - def test_tensor_values(self): device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu:0") input_data = { diff --git a/tests/test_convert_to_multi_channel.py b/tests/transforms/test_convert_to_multi_channel.py similarity index 99% rename from tests/test_convert_to_multi_channel.py rename to tests/transforms/test_convert_to_multi_channel.py index ff08fe1145..dfa324b6b9 100644 --- a/tests/test_convert_to_multi_channel.py +++ b/tests/transforms/test_convert_to_multi_channel.py @@ -48,7 +48,6 @@ class TestConvertToMultiChannel(unittest.TestCase): - @parameterized.expand(TESTS) def test_type_shape(self, data, expected_result): result = ConvertToMultiChannelBasedOnBratsClasses()(data) diff --git a/tests/test_convert_to_multi_channeld.py b/tests/transforms/test_convert_to_multi_channeld.py similarity index 100% rename from tests/test_convert_to_multi_channeld.py rename to tests/transforms/test_convert_to_multi_channeld.py diff --git a/tests/test_copy_itemsd.py b/tests/transforms/test_copy_itemsd.py similarity index 99% rename from tests/test_copy_itemsd.py rename to tests/transforms/test_copy_itemsd.py index 31069a8b90..584f54b438 100644 --- a/tests/test_copy_itemsd.py +++ b/tests/transforms/test_copy_itemsd.py @@ -32,7 +32,6 @@ class TestCopyItemsd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_numpy_values(self, keys, times, names): input_data = {"img": np.array([[0, 1], [1, 2]]), "seg": np.array([[3, 4], [4, 5]])} diff --git a/tests/test_create_grid_and_affine.py b/tests/transforms/test_create_grid_and_affine.py similarity index 99% rename from tests/test_create_grid_and_affine.py rename to tests/transforms/test_create_grid_and_affine.py index a0aca3bbc3..f4793cabe0 100644 --- a/tests/test_create_grid_and_affine.py +++ b/tests/transforms/test_create_grid_and_affine.py @@ -28,7 +28,6 @@ class TestCreateGrid(unittest.TestCase): - def test_create_grid(self): with self.assertRaisesRegex(TypeError, ""): create_grid(None) @@ -169,7 +168,6 @@ def test_assert(func, params, expected): class TestCreateAffine(unittest.TestCase): - def test_create_rotate(self): with self.assertRaisesRegex(TypeError, ""): create_rotate(2, None) diff --git a/tests/test_crop_foreground.py b/tests/transforms/test_crop_foreground.py similarity index 99% rename from tests/test_crop_foreground.py rename to tests/transforms/test_crop_foreground.py index d8f3c54d58..c533e46ee4 100644 --- a/tests/test_crop_foreground.py +++ b/tests/transforms/test_crop_foreground.py @@ -99,7 +99,6 @@ class TestCropForeground(unittest.TestCase): - @parameterized.expand(TEST_COORDS + TESTS) def test_value(self, arguments, image, expected_data, _): cropper = CropForeground(**arguments) diff --git a/tests/test_crop_foregroundd.py b/tests/transforms/test_crop_foregroundd.py similarity index 99% rename from tests/test_crop_foregroundd.py rename to tests/transforms/test_crop_foregroundd.py index 63601ecc29..83d7a8e07c 100644 --- a/tests/test_crop_foregroundd.py +++ b/tests/transforms/test_crop_foregroundd.py @@ -158,7 +158,6 @@ class TestCropForegroundd(unittest.TestCase): - @parameterized.expand(TEST_POSITION + TESTS) def test_value(self, arguments, input_data, expected_data, _): cropper = CropForegroundd(**arguments) diff --git a/tests/test_cucim_dict_transform.py b/tests/transforms/test_cucim_dict_transform.py similarity index 99% rename from tests/test_cucim_dict_transform.py rename to tests/transforms/test_cucim_dict_transform.py index 1b7f3dafec..1febf7d2b4 100644 --- a/tests/test_cucim_dict_transform.py +++ b/tests/transforms/test_cucim_dict_transform.py @@ -66,7 +66,6 @@ @unittest.skipUnless(HAS_CUPY, "CuPy is required.") @unittest.skipUnless(has_cut, "cuCIM transforms are required.") class TestCuCIMDict(unittest.TestCase): - @parameterized.expand( [ TEST_CASE_COLOR_JITTER_1, diff --git a/tests/test_cucim_transform.py b/tests/transforms/test_cucim_transform.py similarity index 99% rename from tests/test_cucim_transform.py rename to tests/transforms/test_cucim_transform.py index 264451444d..97c1e94703 100644 --- a/tests/test_cucim_transform.py +++ b/tests/transforms/test_cucim_transform.py @@ -66,7 +66,6 @@ @unittest.skipUnless(HAS_CUPY, "CuPy is required.") @unittest.skipUnless(has_cut, "cuCIM transforms are required.") class TestCuCIM(unittest.TestCase): - @parameterized.expand( [ TEST_CASE_COLOR_JITTER_1, diff --git a/tests/test_data_stats.py b/tests/transforms/test_data_stats.py similarity index 100% rename from tests/test_data_stats.py rename to tests/transforms/test_data_stats.py diff --git a/tests/test_data_statsd.py b/tests/transforms/test_data_statsd.py similarity index 100% rename from tests/test_data_statsd.py rename to tests/transforms/test_data_statsd.py diff --git a/tests/test_delete_itemsd.py b/tests/transforms/test_delete_itemsd.py similarity index 100% rename from tests/test_delete_itemsd.py rename to tests/transforms/test_delete_itemsd.py diff --git a/tests/test_detect_envelope.py b/tests/transforms/test_detect_envelope.py similarity index 99% rename from tests/test_detect_envelope.py rename to tests/transforms/test_detect_envelope.py index ff8367aa6e..821c6ad947 100644 --- a/tests/test_detect_envelope.py +++ b/tests/transforms/test_detect_envelope.py @@ -116,7 +116,6 @@ @SkipIfNoModule("torch.fft") class TestDetectEnvelope(unittest.TestCase): - @parameterized.expand( [ TEST_CASE_1D_SINE, @@ -152,7 +151,6 @@ def test_value_error(self, arguments, image, method): @SkipIfModule("torch.fft") class TestHilbertTransformNoFFTMod(unittest.TestCase): - def test_no_fft_module_error(self): self.assertRaises(OptionalImportError, DetectEnvelope(), np.random.rand(1, 10)) diff --git a/tests/test_distance_transform_edt.py b/tests/transforms/test_distance_transform_edt.py similarity index 99% rename from tests/test_distance_transform_edt.py rename to tests/transforms/test_distance_transform_edt.py index 3e17eaabd2..ad87512ff8 100644 --- a/tests/test_distance_transform_edt.py +++ b/tests/transforms/test_distance_transform_edt.py @@ -146,7 +146,6 @@ class TestDistanceTransformEDT(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_scipy_transform(self, input, expected_output): transform = DistanceTransformEDT() diff --git a/tests/test_divisible_pad.py b/tests/transforms/test_divisible_pad.py similarity index 100% rename from tests/test_divisible_pad.py rename to tests/transforms/test_divisible_pad.py diff --git a/tests/test_divisible_padd.py b/tests/transforms/test_divisible_padd.py similarity index 100% rename from tests/test_divisible_padd.py rename to tests/transforms/test_divisible_padd.py diff --git a/tests/test_ensure_channel_first.py b/tests/transforms/test_ensure_channel_first.py similarity index 100% rename from tests/test_ensure_channel_first.py rename to tests/transforms/test_ensure_channel_first.py diff --git a/tests/test_ensure_channel_firstd.py b/tests/transforms/test_ensure_channel_firstd.py similarity index 100% rename from tests/test_ensure_channel_firstd.py rename to tests/transforms/test_ensure_channel_firstd.py diff --git a/tests/test_ensure_type.py b/tests/transforms/test_ensure_type.py similarity index 99% rename from tests/test_ensure_type.py rename to tests/transforms/test_ensure_type.py index 61c258b2b8..9e008f9811 100644 --- a/tests/test_ensure_type.py +++ b/tests/transforms/test_ensure_type.py @@ -22,7 +22,6 @@ class TestEnsureType(unittest.TestCase): - def test_array_input(self): test_datas = [np.array([[1, 2], [3, 4]]), torch.as_tensor([[1, 2], [3, 4]])] if torch.cuda.is_available(): diff --git a/tests/test_ensure_typed.py b/tests/transforms/test_ensure_typed.py similarity index 99% rename from tests/test_ensure_typed.py rename to tests/transforms/test_ensure_typed.py index 03ee33be7b..0d2a5610ba 100644 --- a/tests/test_ensure_typed.py +++ b/tests/transforms/test_ensure_typed.py @@ -22,7 +22,6 @@ class TestEnsureTyped(unittest.TestCase): - def test_array_input(self): test_datas = [np.array([[1, 2], [3, 4]]), torch.as_tensor([[1, 2], [3, 4]])] if torch.cuda.is_available(): diff --git a/tests/test_fg_bg_to_indices.py b/tests/transforms/test_fg_bg_to_indices.py similarity index 99% rename from tests/test_fg_bg_to_indices.py rename to tests/transforms/test_fg_bg_to_indices.py index 05dfd45c7a..3de25905ce 100644 --- a/tests/test_fg_bg_to_indices.py +++ b/tests/transforms/test_fg_bg_to_indices.py @@ -72,7 +72,6 @@ class TestFgBgToIndices(unittest.TestCase): - @parameterized.expand(TESTS_CASES) def test_type_shape(self, input_data, label, image, expected_fg, expected_bg): fg_indices, bg_indices = FgBgToIndices(**input_data)(label, image) diff --git a/tests/test_fg_bg_to_indicesd.py b/tests/transforms/test_fg_bg_to_indicesd.py similarity index 99% rename from tests/test_fg_bg_to_indicesd.py rename to tests/transforms/test_fg_bg_to_indicesd.py index 5034c987a3..7a03b386ac 100644 --- a/tests/test_fg_bg_to_indicesd.py +++ b/tests/transforms/test_fg_bg_to_indicesd.py @@ -67,7 +67,6 @@ class TestFgBgToIndicesd(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_type_shape(self, input_data, data, expected_fg, expected_bg): result = FgBgToIndicesd(**input_data)(data) diff --git a/tests/test_fill_holes.py b/tests/transforms/test_fill_holes.py similarity index 99% rename from tests/test_fill_holes.py rename to tests/transforms/test_fill_holes.py index 7b36e63f60..4a90b0c429 100644 --- a/tests/test_fill_holes.py +++ b/tests/transforms/test_fill_holes.py @@ -195,7 +195,6 @@ class TestFillHoles(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_results(self, _, args, input_image, expected): converter = FillHoles(**args) diff --git a/tests/test_fill_holesd.py b/tests/transforms/test_fill_holesd.py similarity index 99% rename from tests/test_fill_holesd.py rename to tests/transforms/test_fill_holesd.py index 08e7c3e78f..f4b44d328f 100644 --- a/tests/test_fill_holesd.py +++ b/tests/transforms/test_fill_holesd.py @@ -196,7 +196,6 @@ class TestFillHoles(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_results(self, _, args, input_image, expected): key = CommonKeys.IMAGE diff --git a/tests/test_flatten_sub_keysd.py b/tests/transforms/test_flatten_sub_keysd.py similarity index 100% rename from tests/test_flatten_sub_keysd.py rename to tests/transforms/test_flatten_sub_keysd.py diff --git a/tests/test_flip.py b/tests/transforms/test_flip.py similarity index 99% rename from tests/test_flip.py rename to tests/transforms/test_flip.py index b5b8d5494f..3d18b7eb89 100644 --- a/tests/test_flip.py +++ b/tests/transforms/test_flip.py @@ -40,7 +40,6 @@ class TestFlip(NumpyImageTestCase2D): - @parameterized.expand(INVALID_CASES) def test_invalid_inputs(self, _, spatial_axis, raises): with self.assertRaises(raises): diff --git a/tests/test_flipd.py b/tests/transforms/test_flipd.py similarity index 99% rename from tests/test_flipd.py rename to tests/transforms/test_flipd.py index 95a453b865..1a2af4b11f 100644 --- a/tests/test_flipd.py +++ b/tests/transforms/test_flipd.py @@ -41,7 +41,6 @@ class TestFlipd(NumpyImageTestCase2D): - @parameterized.expand(INVALID_CASES) def test_invalid_cases(self, _, spatial_axis, raises): with self.assertRaises(raises): diff --git a/tests/test_fourier.py b/tests/transforms/test_fourier.py similarity index 99% rename from tests/test_fourier.py rename to tests/transforms/test_fourier.py index 73fea2cdb1..f9685030d4 100644 --- a/tests/test_fourier.py +++ b/tests/transforms/test_fourier.py @@ -28,7 +28,6 @@ @SkipIfBeforePyTorchVersion((1, 8)) @SkipIfNoModule("torch.fft") class TestFourier(unittest.TestCase): - def setUp(self): set_determinism(0) super().setUp() diff --git a/tests/test_gaussian_sharpen.py b/tests/transforms/test_gaussian_sharpen.py similarity index 99% rename from tests/test_gaussian_sharpen.py rename to tests/transforms/test_gaussian_sharpen.py index 553038181f..596935d6dc 100644 --- a/tests/test_gaussian_sharpen.py +++ b/tests/transforms/test_gaussian_sharpen.py @@ -82,7 +82,6 @@ class TestGaussianSharpen(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): result = GaussianSharpen(**arguments)(image) diff --git a/tests/test_gaussian_sharpend.py b/tests/transforms/test_gaussian_sharpend.py similarity index 99% rename from tests/test_gaussian_sharpend.py rename to tests/transforms/test_gaussian_sharpend.py index 38149dd25a..625d13addc 100644 --- a/tests/test_gaussian_sharpend.py +++ b/tests/transforms/test_gaussian_sharpend.py @@ -82,7 +82,6 @@ class TestGaussianSharpend(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): result = GaussianSharpend(**arguments)(image) diff --git a/tests/test_gaussian_smooth.py b/tests/transforms/test_gaussian_smooth.py similarity index 99% rename from tests/test_gaussian_smooth.py rename to tests/transforms/test_gaussian_smooth.py index e3a9e46e76..a97124fadd 100644 --- a/tests/test_gaussian_smooth.py +++ b/tests/transforms/test_gaussian_smooth.py @@ -86,7 +86,6 @@ class TestGaussianSmooth(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): result = GaussianSmooth(**arguments)(image) diff --git a/tests/test_gaussian_smoothd.py b/tests/transforms/test_gaussian_smoothd.py similarity index 99% rename from tests/test_gaussian_smoothd.py rename to tests/transforms/test_gaussian_smoothd.py index 4471d2fe94..eedec62dd2 100644 --- a/tests/test_gaussian_smoothd.py +++ b/tests/transforms/test_gaussian_smoothd.py @@ -86,7 +86,6 @@ class TestGaussianSmoothd(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): result = GaussianSmoothd(**arguments)(image) diff --git a/tests/test_generate_label_classes_crop_centers.py b/tests/transforms/test_generate_label_classes_crop_centers.py similarity index 99% rename from tests/test_generate_label_classes_crop_centers.py rename to tests/transforms/test_generate_label_classes_crop_centers.py index bfe65465e2..61b9b40096 100644 --- a/tests/test_generate_label_classes_crop_centers.py +++ b/tests/transforms/test_generate_label_classes_crop_centers.py @@ -48,7 +48,6 @@ class TestGenerateLabelClassesCropCenters(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_type_shape(self, input_data, expected_type, expected_count, expected_shape): results = [] diff --git a/tests/test_generate_pos_neg_label_crop_centers.py b/tests/transforms/test_generate_pos_neg_label_crop_centers.py similarity index 99% rename from tests/test_generate_pos_neg_label_crop_centers.py rename to tests/transforms/test_generate_pos_neg_label_crop_centers.py index 80c179ffaf..112350072d 100644 --- a/tests/test_generate_pos_neg_label_crop_centers.py +++ b/tests/transforms/test_generate_pos_neg_label_crop_centers.py @@ -51,7 +51,6 @@ class TestGeneratePosNegLabelCropCenters(unittest.TestCase): - @parameterized.expand(TESTS) def test_type_shape(self, input_data, expected_type, expected_count, expected_shape): results = [] diff --git a/tests/test_generate_spatial_bounding_box.py b/tests/transforms/test_generate_spatial_bounding_box.py similarity index 99% rename from tests/test_generate_spatial_bounding_box.py rename to tests/transforms/test_generate_spatial_bounding_box.py index 94cf1a58d7..1f63b2c1ef 100644 --- a/tests/test_generate_spatial_bounding_box.py +++ b/tests/transforms/test_generate_spatial_bounding_box.py @@ -104,7 +104,6 @@ class TestGenerateSpatialBoundingBox(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, input_data, expected_box): result = generate_spatial_bounding_box(**input_data) diff --git a/tests/test_get_extreme_points.py b/tests/transforms/test_get_extreme_points.py similarity index 99% rename from tests/test_get_extreme_points.py rename to tests/transforms/test_get_extreme_points.py index 0a062d5214..370a11f0c7 100644 --- a/tests/test_get_extreme_points.py +++ b/tests/transforms/test_get_extreme_points.py @@ -47,7 +47,6 @@ class TestGetExtremePoints(unittest.TestCase): - @parameterized.expand(TESTS) def test_type_shape(self, input_data, expected): result = get_extreme_points(**input_data) diff --git a/tests/test_gibbs_noise.py b/tests/transforms/test_gibbs_noise.py similarity index 99% rename from tests/test_gibbs_noise.py rename to tests/transforms/test_gibbs_noise.py index 145a1d10ac..2aa2a44d10 100644 --- a/tests/test_gibbs_noise.py +++ b/tests/transforms/test_gibbs_noise.py @@ -32,7 +32,6 @@ class TestGibbsNoise(unittest.TestCase): - def setUp(self): set_determinism(0) super().setUp() diff --git a/tests/test_gibbs_noised.py b/tests/transforms/test_gibbs_noised.py similarity index 99% rename from tests/test_gibbs_noised.py rename to tests/transforms/test_gibbs_noised.py index 8c8cca513c..f951e6dd3b 100644 --- a/tests/test_gibbs_noised.py +++ b/tests/transforms/test_gibbs_noised.py @@ -33,7 +33,6 @@ class TestGibbsNoised(unittest.TestCase): - def setUp(self): set_determinism(0) super().setUp() diff --git a/tests/test_grid_distortion.py b/tests/transforms/test_grid_distortion.py similarity index 99% rename from tests/test_grid_distortion.py rename to tests/transforms/test_grid_distortion.py index e923d828f6..5b2d014132 100644 --- a/tests/test_grid_distortion.py +++ b/tests/transforms/test_grid_distortion.py @@ -99,7 +99,6 @@ class TestGridDistortion(unittest.TestCase): - @parameterized.expand(TESTS) def test_grid_distortion(self, input_param, input_data, expected_val): g = GridDistortion(**input_param) diff --git a/tests/test_grid_distortiond.py b/tests/transforms/test_grid_distortiond.py similarity index 99% rename from tests/test_grid_distortiond.py rename to tests/transforms/test_grid_distortiond.py index 495403885c..c56450ad3e 100644 --- a/tests/test_grid_distortiond.py +++ b/tests/transforms/test_grid_distortiond.py @@ -75,7 +75,6 @@ class TestGridDistortiond(unittest.TestCase): - @parameterized.expand(TESTS) def test_grid_distortiond(self, input_param, input_data, expected_val_img, expected_val_mask): g = GridDistortiond(**input_param) diff --git a/tests/test_grid_split.py b/tests/transforms/test_grid_split.py similarity index 99% rename from tests/test_grid_split.py rename to tests/transforms/test_grid_split.py index e4a8571b47..1e9ac8e5f2 100644 --- a/tests/test_grid_split.py +++ b/tests/transforms/test_grid_split.py @@ -66,7 +66,6 @@ class TestGridSplit(unittest.TestCase): - @parameterized.expand(TEST_SINGLE) def test_split_patch_single_call(self, in_type, input_parameters, image, expected): input_image = in_type(image) diff --git a/tests/test_grid_splitd.py b/tests/transforms/test_grid_splitd.py similarity index 99% rename from tests/test_grid_splitd.py rename to tests/transforms/test_grid_splitd.py index 2c39acdee0..b0daf3fece 100644 --- a/tests/test_grid_splitd.py +++ b/tests/transforms/test_grid_splitd.py @@ -70,7 +70,6 @@ class TestGridSplitd(unittest.TestCase): - @parameterized.expand(TEST_SINGLE) def test_split_patch_single_call(self, in_type, input_parameters, img_dict, expected): input_dict = {} diff --git a/tests/test_histogram_normalize.py b/tests/transforms/test_histogram_normalize.py similarity index 99% rename from tests/test_histogram_normalize.py rename to tests/transforms/test_histogram_normalize.py index 7f3abf63f5..7378400c5e 100644 --- a/tests/test_histogram_normalize.py +++ b/tests/transforms/test_histogram_normalize.py @@ -48,7 +48,6 @@ class TestHistogramNormalize(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): result = HistogramNormalize(**arguments)(image) diff --git a/tests/test_histogram_normalized.py b/tests/transforms/test_histogram_normalized.py similarity index 99% rename from tests/test_histogram_normalized.py rename to tests/transforms/test_histogram_normalized.py index ceadb66b74..a7b2d8488f 100644 --- a/tests/test_histogram_normalized.py +++ b/tests/transforms/test_histogram_normalized.py @@ -48,7 +48,6 @@ class TestHistogramNormalized(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): result = HistogramNormalized(**arguments)(image)["img"] diff --git a/tests/test_image_filter.py b/tests/transforms/test_image_filter.py similarity index 100% rename from tests/test_image_filter.py rename to tests/transforms/test_image_filter.py diff --git a/tests/test_intensity_stats.py b/tests/transforms/test_intensity_stats.py similarity index 99% rename from tests/test_intensity_stats.py rename to tests/transforms/test_intensity_stats.py index ca3a440cb6..2338cca805 100644 --- a/tests/test_intensity_stats.py +++ b/tests/transforms/test_intensity_stats.py @@ -53,7 +53,6 @@ class TestIntensityStats(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, input_param, img, meta_dict, expected): _, meta_dict = IntensityStats(**input_param)(img, meta_dict) diff --git a/tests/test_intensity_statsd.py b/tests/transforms/test_intensity_statsd.py similarity index 100% rename from tests/test_intensity_statsd.py rename to tests/transforms/test_intensity_statsd.py diff --git a/tests/test_inverse.py b/tests/transforms/test_inverse.py similarity index 100% rename from tests/test_inverse.py rename to tests/transforms/test_inverse.py diff --git a/tests/test_inverse_collation.py b/tests/transforms/test_inverse_collation.py similarity index 100% rename from tests/test_inverse_collation.py rename to tests/transforms/test_inverse_collation.py diff --git a/tests/test_invert.py b/tests/transforms/test_invert.py similarity index 99% rename from tests/test_invert.py rename to tests/transforms/test_invert.py index 521207948e..a7391f5fb2 100644 --- a/tests/test_invert.py +++ b/tests/transforms/test_invert.py @@ -41,7 +41,6 @@ class TestInvert(unittest.TestCase): - def test_invert(self): set_determinism(seed=0) im_fname = make_nifti_image(create_test_image_3d(101, 100, 107, noise_max=100)[1]) # label image, discrete diff --git a/tests/test_invertd.py b/tests/transforms/test_invertd.py similarity index 99% rename from tests/test_invertd.py rename to tests/transforms/test_invertd.py index af6bffb696..2b5e9da85d 100644 --- a/tests/test_invertd.py +++ b/tests/transforms/test_invertd.py @@ -43,7 +43,6 @@ class TestInvertd(unittest.TestCase): - def test_invert(self): set_determinism(seed=0) im_fname, seg_fname = (make_nifti_image(i) for i in create_test_image_3d(101, 100, 107, noise_max=100)) diff --git a/tests/test_k_space_spike_noise.py b/tests/transforms/test_k_space_spike_noise.py similarity index 99% rename from tests/test_k_space_spike_noise.py rename to tests/transforms/test_k_space_spike_noise.py index 3101d5c1c0..20ce785517 100644 --- a/tests/test_k_space_spike_noise.py +++ b/tests/transforms/test_k_space_spike_noise.py @@ -32,7 +32,6 @@ class TestKSpaceSpikeNoise(unittest.TestCase): - def setUp(self): set_determinism(0) super().setUp() diff --git a/tests/test_k_space_spike_noised.py b/tests/transforms/test_k_space_spike_noised.py similarity index 99% rename from tests/test_k_space_spike_noised.py rename to tests/transforms/test_k_space_spike_noised.py index aa52217ac2..8dbb8c2f49 100644 --- a/tests/test_k_space_spike_noised.py +++ b/tests/transforms/test_k_space_spike_noised.py @@ -33,7 +33,6 @@ class TestKSpaceSpikeNoised(unittest.TestCase): - def setUp(self): set_determinism(0) super().setUp() diff --git a/tests/test_keep_largest_connected_component.py b/tests/transforms/test_keep_largest_connected_component.py similarity index 99% rename from tests/test_keep_largest_connected_component.py rename to tests/transforms/test_keep_largest_connected_component.py index 7b2d81a88b..639cdf9936 100644 --- a/tests/test_keep_largest_connected_component.py +++ b/tests/transforms/test_keep_largest_connected_component.py @@ -381,7 +381,6 @@ def to_onehot(x): class TestKeepLargestConnectedComponent(unittest.TestCase): - @parameterized.expand(TESTS) def test_correct_results(self, _, args, input_image, expected): converter = KeepLargestConnectedComponent(**args) diff --git a/tests/test_keep_largest_connected_componentd.py b/tests/transforms/test_keep_largest_connected_componentd.py similarity index 99% rename from tests/test_keep_largest_connected_componentd.py rename to tests/transforms/test_keep_largest_connected_componentd.py index 22f289768c..3df87e99f1 100644 --- a/tests/test_keep_largest_connected_componentd.py +++ b/tests/transforms/test_keep_largest_connected_componentd.py @@ -337,7 +337,6 @@ class TestKeepLargestConnectedComponentd(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_results(self, _, args, input_dict, expected): converter = KeepLargestConnectedComponentd(**args) diff --git a/tests/test_label_filter.py b/tests/transforms/test_label_filter.py similarity index 99% rename from tests/test_label_filter.py rename to tests/transforms/test_label_filter.py index 036219b42d..414dfc5e7e 100644 --- a/tests/test_label_filter.py +++ b/tests/transforms/test_label_filter.py @@ -58,7 +58,6 @@ class TestLabelFilter(unittest.TestCase): - @parameterized.expand(VALID_TESTS) def test_correct_results(self, _, args, input_image, expected): converter = LabelFilter(**args) diff --git a/tests/test_label_to_contour.py b/tests/transforms/test_label_to_contour.py similarity index 99% rename from tests/test_label_to_contour.py rename to tests/transforms/test_label_to_contour.py index 07f600e2f8..3ec94e1d13 100644 --- a/tests/test_label_to_contour.py +++ b/tests/transforms/test_label_to_contour.py @@ -142,7 +142,6 @@ def gen_fixed_img(array_type): class TestContour(unittest.TestCase): - def test_contour(self): input_param = {"kernel_type": "Laplace"} diff --git a/tests/test_label_to_contourd.py b/tests/transforms/test_label_to_contourd.py similarity index 99% rename from tests/test_label_to_contourd.py rename to tests/transforms/test_label_to_contourd.py index 157bbadbbd..1d31ec8a38 100644 --- a/tests/test_label_to_contourd.py +++ b/tests/transforms/test_label_to_contourd.py @@ -143,7 +143,6 @@ def gen_fixed_img(array_type): class TestContourd(unittest.TestCase): - def test_contour(self): input_param = {"keys": "img", "kernel_type": "Laplace"} diff --git a/tests/test_label_to_mask.py b/tests/transforms/test_label_to_mask.py similarity index 99% rename from tests/test_label_to_mask.py rename to tests/transforms/test_label_to_mask.py index f31bd71158..66b3f09294 100644 --- a/tests/test_label_to_mask.py +++ b/tests/transforms/test_label_to_mask.py @@ -59,7 +59,6 @@ class TestLabelToMask(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): result = LabelToMask(**arguments)(image) diff --git a/tests/test_label_to_maskd.py b/tests/transforms/test_label_to_maskd.py similarity index 99% rename from tests/test_label_to_maskd.py rename to tests/transforms/test_label_to_maskd.py index 521853116e..051806e3eb 100644 --- a/tests/test_label_to_maskd.py +++ b/tests/transforms/test_label_to_maskd.py @@ -59,7 +59,6 @@ class TestLabelToMaskd(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, input_data, expected_data): result = LabelToMaskd(**arguments)(input_data) diff --git a/tests/test_load_image.py b/tests/transforms/test_load_image.py similarity index 99% rename from tests/test_load_image.py rename to tests/transforms/test_load_image.py index 2bd9d64078..930a18f2ee 100644 --- a/tests/test_load_image.py +++ b/tests/transforms/test_load_image.py @@ -186,7 +186,6 @@ def get_data(self, _obj): @unittest.skipUnless(has_itk, "itk not installed") class TestLoadImage(unittest.TestCase): - @classmethod def setUpClass(cls): super(__class__, cls).setUpClass() @@ -470,7 +469,6 @@ def test_channel_dim(self, input_param, filename, expected_shape): @unittest.skipUnless(has_itk, "itk not installed") class TestLoadImageMeta(unittest.TestCase): - @classmethod def setUpClass(cls): super(__class__, cls).setUpClass() diff --git a/tests/test_load_imaged.py b/tests/transforms/test_load_imaged.py similarity index 99% rename from tests/test_load_imaged.py rename to tests/transforms/test_load_imaged.py index 62663fa1b3..27ed993022 100644 --- a/tests/test_load_imaged.py +++ b/tests/transforms/test_load_imaged.py @@ -46,7 +46,6 @@ @unittest.skipUnless(has_itk, "itk not installed") class TestLoadImaged(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_shape(self, input_param, expected_shape): test_image = nib.Nifti1Image(np.random.rand(128, 128, 128), np.eye(4)) @@ -95,7 +94,6 @@ def test_no_file(self): @unittest.skipUnless(has_itk, "itk not installed") class TestConsistency(unittest.TestCase): - def _cmp(self, filename, ch_shape, reader_1, reader_2, outname, ext): data_dict = {"img": filename} keys = data_dict.keys() @@ -157,7 +155,6 @@ def test_png(self): @unittest.skipUnless(has_itk, "itk not installed") class TestLoadImagedMeta(unittest.TestCase): - @classmethod def setUpClass(cls): super(__class__, cls).setUpClass() diff --git a/tests/test_load_spacing_orientation.py b/tests/transforms/test_load_spacing_orientation.py similarity index 97% rename from tests/test_load_spacing_orientation.py rename to tests/transforms/test_load_spacing_orientation.py index cbc730e1bb..8ba3c09789 100644 --- a/tests/test_load_spacing_orientation.py +++ b/tests/transforms/test_load_spacing_orientation.py @@ -14,6 +14,7 @@ import os import time import unittest +from pathlib import Path import nibabel import numpy as np @@ -23,14 +24,13 @@ from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, Orientationd, Spacingd +TESTS_PATH = Path(__file__).parents[1] FILES = tuple( - os.path.join(os.path.dirname(__file__), "testing_data", filename) - for filename in ("anatomical.nii", "reoriented_anat_moved.nii") + os.path.join(TESTS_PATH, "testing_data", filename) for filename in ("anatomical.nii", "reoriented_anat_moved.nii") ) class TestLoadSpacingOrientation(unittest.TestCase): - @staticmethod def load_image(filename): data = {"image": filename} diff --git a/tests/test_map_and_generate_sampling_centers.py b/tests/transforms/test_map_and_generate_sampling_centers.py similarity index 99% rename from tests/test_map_and_generate_sampling_centers.py rename to tests/transforms/test_map_and_generate_sampling_centers.py index 5868597ad6..ac16b04be9 100644 --- a/tests/test_map_and_generate_sampling_centers.py +++ b/tests/transforms/test_map_and_generate_sampling_centers.py @@ -63,7 +63,6 @@ class TestMapAndGenerateSamplingCenters(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_map_and_generate_sampling_centers(self, input_data, expected_type, expected_count, expected_shape): results = [] diff --git a/tests/test_map_binary_to_indices.py b/tests/transforms/test_map_binary_to_indices.py similarity index 99% rename from tests/test_map_binary_to_indices.py rename to tests/transforms/test_map_binary_to_indices.py index e37adbc26c..7ba9883700 100644 --- a/tests/test_map_binary_to_indices.py +++ b/tests/transforms/test_map_binary_to_indices.py @@ -64,7 +64,6 @@ class TestMapBinaryToIndices(unittest.TestCase): - @parameterized.expand(TESTS) def test_type_shape(self, input_data, expected_fg, expected_bg): fg_indices, bg_indices = map_binary_to_indices(**input_data) diff --git a/tests/test_map_classes_to_indices.py b/tests/transforms/test_map_classes_to_indices.py similarity index 99% rename from tests/test_map_classes_to_indices.py rename to tests/transforms/test_map_classes_to_indices.py index c7b5c5bea0..f626705195 100644 --- a/tests/test_map_classes_to_indices.py +++ b/tests/transforms/test_map_classes_to_indices.py @@ -124,7 +124,6 @@ class TestMapClassesToIndices(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, input_data, expected_indices): indices = map_classes_to_indices(**input_data) diff --git a/tests/test_map_label_value.py b/tests/transforms/test_map_label_value.py similarity index 99% rename from tests/test_map_label_value.py rename to tests/transforms/test_map_label_value.py index 4e64dc5272..ea22e960c0 100644 --- a/tests/test_map_label_value.py +++ b/tests/transforms/test_map_label_value.py @@ -75,7 +75,6 @@ class TestMapLabelValue(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, input_param, input_data, expected_value): result = MapLabelValue(**input_param)(input_data) diff --git a/tests/test_map_label_valued.py b/tests/transforms/test_map_label_valued.py similarity index 99% rename from tests/test_map_label_valued.py rename to tests/transforms/test_map_label_valued.py index afc71ab21d..521c2a1872 100644 --- a/tests/test_map_label_valued.py +++ b/tests/transforms/test_map_label_valued.py @@ -69,7 +69,6 @@ class TestMapLabelValued(unittest.TestCase): - @parameterized.expand( [TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_5_1, TEST_CASE_6, TEST_CASE_7] ) diff --git a/tests/test_map_transform.py b/tests/transforms/test_map_transform.py similarity index 100% rename from tests/test_map_transform.py rename to tests/transforms/test_map_transform.py diff --git a/tests/test_mask_intensity.py b/tests/transforms/test_mask_intensity.py similarity index 99% rename from tests/test_mask_intensity.py rename to tests/transforms/test_mask_intensity.py index 3c788029f5..7ab3994550 100644 --- a/tests/test_mask_intensity.py +++ b/tests/transforms/test_mask_intensity.py @@ -55,7 +55,6 @@ class TestMaskIntensity(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5]) def test_value(self, arguments, image, expected_data): for p in TEST_NDARRAYS: diff --git a/tests/test_mask_intensityd.py b/tests/transforms/test_mask_intensityd.py similarity index 100% rename from tests/test_mask_intensityd.py rename to tests/transforms/test_mask_intensityd.py diff --git a/tests/test_mean_ensemble.py b/tests/transforms/test_mean_ensemble.py similarity index 99% rename from tests/test_mean_ensemble.py rename to tests/transforms/test_mean_ensemble.py index eddfe47281..55bd6b1422 100644 --- a/tests/test_mean_ensemble.py +++ b/tests/transforms/test_mean_ensemble.py @@ -58,7 +58,6 @@ class TestMeanEnsemble(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, input_param, img, expected_value): result = MeanEnsemble(**input_param)(img) diff --git a/tests/test_mean_ensembled.py b/tests/transforms/test_mean_ensembled.py similarity index 99% rename from tests/test_mean_ensembled.py rename to tests/transforms/test_mean_ensembled.py index 631b0d3a92..ed6280e8b9 100644 --- a/tests/test_mean_ensembled.py +++ b/tests/transforms/test_mean_ensembled.py @@ -72,7 +72,6 @@ class TestMeanEnsembled(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, input_param, data, expected_value): result = MeanEnsembled(**input_param)(data) diff --git a/tests/test_median_smooth.py b/tests/transforms/test_median_smooth.py similarity index 99% rename from tests/test_median_smooth.py rename to tests/transforms/test_median_smooth.py index 96f273b4cb..97618a0b43 100644 --- a/tests/test_median_smooth.py +++ b/tests/transforms/test_median_smooth.py @@ -31,7 +31,6 @@ class TestMedianSmooth(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): result = MedianSmooth(**arguments)(image) diff --git a/tests/test_median_smoothd.py b/tests/transforms/test_median_smoothd.py similarity index 99% rename from tests/test_median_smoothd.py rename to tests/transforms/test_median_smoothd.py index 0ca282991a..24e325d107 100644 --- a/tests/test_median_smoothd.py +++ b/tests/transforms/test_median_smoothd.py @@ -55,7 +55,6 @@ class TestMedianSmoothd(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): result = MedianSmoothd(**arguments)(image) diff --git a/tests/test_morphological_ops.py b/tests/transforms/test_morphological_ops.py similarity index 99% rename from tests/test_morphological_ops.py rename to tests/transforms/test_morphological_ops.py index b43f382753..1999bdc99a 100644 --- a/tests/test_morphological_ops.py +++ b/tests/transforms/test_morphological_ops.py @@ -77,7 +77,6 @@ class TestMorph(unittest.TestCase): - @parameterized.expand(TESTS_SHAPE) def test_shape(self, input_data, expected_result): result1 = erode(input_data["mask"], input_data["filter_size"]) diff --git a/tests/test_nifti_endianness.py b/tests/transforms/test_nifti_endianness.py similarity index 100% rename from tests/test_nifti_endianness.py rename to tests/transforms/test_nifti_endianness.py diff --git a/tests/test_normalize_intensity.py b/tests/transforms/test_normalize_intensity.py similarity index 99% rename from tests/test_normalize_intensity.py rename to tests/transforms/test_normalize_intensity.py index b427264b0f..c58bc587f2 100644 --- a/tests/test_normalize_intensity.py +++ b/tests/transforms/test_normalize_intensity.py @@ -83,7 +83,6 @@ class TestNormalizeIntensity(NumpyImageTestCase2D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_default(self, im_type): im = im_type(self.imt.copy()) diff --git a/tests/test_normalize_intensityd.py b/tests/transforms/test_normalize_intensityd.py similarity index 99% rename from tests/test_normalize_intensityd.py rename to tests/transforms/test_normalize_intensityd.py index d9bc14d95a..b8e4c7bca8 100644 --- a/tests/test_normalize_intensityd.py +++ b/tests/transforms/test_normalize_intensityd.py @@ -51,7 +51,6 @@ class TestNormalizeIntensityd(NumpyImageTestCase2D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_image_normalize_intensityd(self, im_type): key = "img" diff --git a/tests/test_nvtx_decorator.py b/tests/transforms/test_nvtx_decorator.py similarity index 99% rename from tests/test_nvtx_decorator.py rename to tests/transforms/test_nvtx_decorator.py index 70da469a65..74fdd5fa28 100644 --- a/tests/test_nvtx_decorator.py +++ b/tests/transforms/test_nvtx_decorator.py @@ -72,7 +72,6 @@ @unittest.skipUnless(has_nvtx, "Required torch._C._nvtx for NVTX Range!") class TestNVTXRangeDecorator(unittest.TestCase): - @parameterized.expand([TEST_CASE_ARRAY_0, TEST_CASE_ARRAY_1]) def test_tranform_array(self, input): transforms = Compose([Range("random flip")(Flip()), Range()(ToTensor())]) diff --git a/tests/test_nvtx_transform.py b/tests/transforms/test_nvtx_transform.py similarity index 100% rename from tests/test_nvtx_transform.py rename to tests/transforms/test_nvtx_transform.py diff --git a/tests/test_orientation.py b/tests/transforms/test_orientation.py similarity index 99% rename from tests/test_orientation.py rename to tests/transforms/test_orientation.py index 17482cd41d..fee287dd5b 100644 --- a/tests/test_orientation.py +++ b/tests/transforms/test_orientation.py @@ -177,7 +177,6 @@ class TestOrientationCase(unittest.TestCase): - @parameterized.expand(TESTS) def test_ornt_meta( self, diff --git a/tests/test_orientationd.py b/tests/transforms/test_orientationd.py similarity index 99% rename from tests/test_orientationd.py rename to tests/transforms/test_orientationd.py index 24c1644557..3fe52b0b8a 100644 --- a/tests/test_orientationd.py +++ b/tests/transforms/test_orientationd.py @@ -65,7 +65,6 @@ class TestOrientationdCase(unittest.TestCase): - @parameterized.expand(TESTS) def test_orntd( self, init_param, img: torch.Tensor, affine: torch.Tensor | None, expected_shape, expected_code, device diff --git a/tests/test_rand_adjust_contrast.py b/tests/transforms/test_rand_adjust_contrast.py similarity index 99% rename from tests/test_rand_adjust_contrast.py rename to tests/transforms/test_rand_adjust_contrast.py index 777f14bcfe..36d0724e83 100644 --- a/tests/test_rand_adjust_contrast.py +++ b/tests/transforms/test_rand_adjust_contrast.py @@ -25,7 +25,6 @@ class TestRandAdjustContrast(NumpyImageTestCase2D): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_correct_results(self, gamma): adjuster = RandAdjustContrast(prob=1.0, gamma=gamma) diff --git a/tests/test_rand_adjust_contrastd.py b/tests/transforms/test_rand_adjust_contrastd.py similarity index 99% rename from tests/test_rand_adjust_contrastd.py rename to tests/transforms/test_rand_adjust_contrastd.py index d18782580e..ab5ae5ef9b 100644 --- a/tests/test_rand_adjust_contrastd.py +++ b/tests/transforms/test_rand_adjust_contrastd.py @@ -25,7 +25,6 @@ class TestRandAdjustContrastd(NumpyImageTestCase2D): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_correct_results(self, gamma): adjuster = RandAdjustContrastd("img", prob=1.0, gamma=gamma) diff --git a/tests/test_rand_affine.py b/tests/transforms/test_rand_affine.py similarity index 99% rename from tests/test_rand_affine.py rename to tests/transforms/test_rand_affine.py index 6b544d2be2..7b07d5f09d 100644 --- a/tests/test_rand_affine.py +++ b/tests/transforms/test_rand_affine.py @@ -140,7 +140,6 @@ class TestRandAffine(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_affine(self, input_param, input_data, expected_val): g = RandAffine(**input_param) diff --git a/tests/test_rand_affine_grid.py b/tests/transforms/test_rand_affine_grid.py similarity index 99% rename from tests/test_rand_affine_grid.py rename to tests/transforms/test_rand_affine_grid.py index 0912abc297..c27b44baa9 100644 --- a/tests/test_rand_affine_grid.py +++ b/tests/transforms/test_rand_affine_grid.py @@ -198,7 +198,6 @@ class TestRandAffineGrid(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_affine_grid(self, input_param, input_data, expected_val): g = RandAffineGrid(**input_param) diff --git a/tests/test_rand_affined.py b/tests/transforms/test_rand_affined.py similarity index 99% rename from tests/test_rand_affined.py rename to tests/transforms/test_rand_affined.py index 83848e7482..1c55a936d8 100644 --- a/tests/test_rand_affined.py +++ b/tests/transforms/test_rand_affined.py @@ -216,7 +216,6 @@ class TestRandAffined(unittest.TestCase): - @parameterized.expand(x + [y] for x, y in itertools.product(TESTS, (False, True))) def test_rand_affined(self, input_param, input_data, expected_val, track_meta): set_track_meta(track_meta) diff --git a/tests/test_rand_axis_flip.py b/tests/transforms/test_rand_axis_flip.py similarity index 99% rename from tests/test_rand_axis_flip.py rename to tests/transforms/test_rand_axis_flip.py index 476cfeca16..6904f77462 100644 --- a/tests/test_rand_axis_flip.py +++ b/tests/transforms/test_rand_axis_flip.py @@ -23,7 +23,6 @@ class TestRandAxisFlip(NumpyImageTestCase2D): - def test_correct_results(self): for p in TEST_NDARRAYS_ALL: flip = RandAxisFlip(prob=1.0) diff --git a/tests/test_rand_axis_flipd.py b/tests/transforms/test_rand_axis_flipd.py similarity index 99% rename from tests/test_rand_axis_flipd.py rename to tests/transforms/test_rand_axis_flipd.py index e0ae28cf37..59da54c5f1 100644 --- a/tests/test_rand_axis_flipd.py +++ b/tests/transforms/test_rand_axis_flipd.py @@ -23,7 +23,6 @@ class TestRandAxisFlip(NumpyImageTestCase3D): - def test_correct_results(self): for p in TEST_NDARRAYS_ALL: flip = RandAxisFlipd(keys="img", prob=1.0) diff --git a/tests/test_rand_bias_field.py b/tests/transforms/test_rand_bias_field.py similarity index 99% rename from tests/test_rand_bias_field.py rename to tests/transforms/test_rand_bias_field.py index 682e6a008a..2789338cd8 100644 --- a/tests/test_rand_bias_field.py +++ b/tests/transforms/test_rand_bias_field.py @@ -30,7 +30,6 @@ class TestRandBiasField(unittest.TestCase): - @parameterized.expand([TEST_CASES_2D, TEST_CASES_3D]) def test_output_shape(self, class_args, img_shape): for p in TEST_NDARRAYS: diff --git a/tests/test_rand_bias_fieldd.py b/tests/transforms/test_rand_bias_fieldd.py similarity index 100% rename from tests/test_rand_bias_fieldd.py rename to tests/transforms/test_rand_bias_fieldd.py diff --git a/tests/test_rand_coarse_dropout.py b/tests/transforms/test_rand_coarse_dropout.py similarity index 99% rename from tests/test_rand_coarse_dropout.py rename to tests/transforms/test_rand_coarse_dropout.py index 8df823d236..5d4173d163 100644 --- a/tests/test_rand_coarse_dropout.py +++ b/tests/transforms/test_rand_coarse_dropout.py @@ -63,7 +63,6 @@ class TestRandCoarseDropout(unittest.TestCase): - @parameterized.expand( [TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7] ) diff --git a/tests/test_rand_coarse_dropoutd.py b/tests/transforms/test_rand_coarse_dropoutd.py similarity index 100% rename from tests/test_rand_coarse_dropoutd.py rename to tests/transforms/test_rand_coarse_dropoutd.py diff --git a/tests/test_rand_coarse_shuffle.py b/tests/transforms/test_rand_coarse_shuffle.py similarity index 100% rename from tests/test_rand_coarse_shuffle.py rename to tests/transforms/test_rand_coarse_shuffle.py diff --git a/tests/test_rand_coarse_shuffled.py b/tests/transforms/test_rand_coarse_shuffled.py similarity index 100% rename from tests/test_rand_coarse_shuffled.py rename to tests/transforms/test_rand_coarse_shuffled.py diff --git a/tests/test_rand_crop_by_label_classes.py b/tests/transforms/test_rand_crop_by_label_classes.py similarity index 99% rename from tests/test_rand_crop_by_label_classes.py rename to tests/transforms/test_rand_crop_by_label_classes.py index 12b235ea7f..84c75858e9 100644 --- a/tests/test_rand_crop_by_label_classes.py +++ b/tests/transforms/test_rand_crop_by_label_classes.py @@ -127,7 +127,6 @@ class TestRandCropByLabelClasses(unittest.TestCase): - @parameterized.expand(TESTS_INDICES + TESTS_SHAPE) def test_type_shape(self, input_param, input_data, expected_type, expected_shape): result = RandCropByLabelClasses(**input_param)(**input_data) diff --git a/tests/test_rand_crop_by_label_classesd.py b/tests/transforms/test_rand_crop_by_label_classesd.py similarity index 99% rename from tests/test_rand_crop_by_label_classesd.py rename to tests/transforms/test_rand_crop_by_label_classesd.py index 4fd415ec59..290165fbd4 100644 --- a/tests/test_rand_crop_by_label_classesd.py +++ b/tests/transforms/test_rand_crop_by_label_classesd.py @@ -120,7 +120,6 @@ class TestRandCropByLabelClassesd(unittest.TestCase): - @parameterized.expand(TESTS) def test_type_shape(self, input_param, input_data, expected_type, expected_shape): result = RandCropByLabelClassesd(**input_param)(input_data) diff --git a/tests/test_rand_crop_by_pos_neg_label.py b/tests/transforms/test_rand_crop_by_pos_neg_label.py similarity index 99% rename from tests/test_rand_crop_by_pos_neg_label.py rename to tests/transforms/test_rand_crop_by_pos_neg_label.py index ef7ae44987..85b6d0c65e 100644 --- a/tests/test_rand_crop_by_pos_neg_label.py +++ b/tests/transforms/test_rand_crop_by_pos_neg_label.py @@ -96,7 +96,6 @@ class TestRandCropByPosNegLabel(unittest.TestCase): - @staticmethod def convert_data_type(im_type, d, keys=("img", "image", "label")): out = deepcopy(d) diff --git a/tests/test_rand_crop_by_pos_neg_labeld.py b/tests/transforms/test_rand_crop_by_pos_neg_labeld.py similarity index 99% rename from tests/test_rand_crop_by_pos_neg_labeld.py rename to tests/transforms/test_rand_crop_by_pos_neg_labeld.py index 4a1b152d95..0724397163 100644 --- a/tests/test_rand_crop_by_pos_neg_labeld.py +++ b/tests/transforms/test_rand_crop_by_pos_neg_labeld.py @@ -107,7 +107,6 @@ class TestRandCropByPosNegLabeld(unittest.TestCase): - @staticmethod def convert_data_type(im_type, d, keys=("img", "image", "label")): out = deepcopy(d) diff --git a/tests/test_rand_cucim_dict_transform.py b/tests/transforms/test_rand_cucim_dict_transform.py similarity index 99% rename from tests/test_rand_cucim_dict_transform.py rename to tests/transforms/test_rand_cucim_dict_transform.py index d5cb1ad1c6..2bb2009c23 100644 --- a/tests/test_rand_cucim_dict_transform.py +++ b/tests/transforms/test_rand_cucim_dict_transform.py @@ -78,7 +78,6 @@ @unittest.skipUnless(HAS_CUPY, "CuPy is required.") @unittest.skipUnless(has_cut, "cuCIM transforms are required.") class TestRandCuCIMDict(unittest.TestCase): - @parameterized.expand( [ TEST_CASE_COLOR_JITTER_1, diff --git a/tests/test_rand_cucim_transform.py b/tests/transforms/test_rand_cucim_transform.py similarity index 99% rename from tests/test_rand_cucim_transform.py rename to tests/transforms/test_rand_cucim_transform.py index a7b4d8aecd..7406e71e41 100644 --- a/tests/test_rand_cucim_transform.py +++ b/tests/transforms/test_rand_cucim_transform.py @@ -78,7 +78,6 @@ @unittest.skipUnless(HAS_CUPY, "CuPy is required.") @unittest.skipUnless(has_cut, "cuCIM transforms are required.") class TestRandCuCIM(unittest.TestCase): - @parameterized.expand( [ TEST_CASE_COLOR_JITTER_1, diff --git a/tests/test_rand_deform_grid.py b/tests/transforms/test_rand_deform_grid.py similarity index 99% rename from tests/test_rand_deform_grid.py rename to tests/transforms/test_rand_deform_grid.py index 53a9e1195f..3e2cc4ee76 100644 --- a/tests/test_rand_deform_grid.py +++ b/tests/transforms/test_rand_deform_grid.py @@ -126,7 +126,6 @@ class TestRandDeformGrid(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_rand_deform_grid(self, input_param, input_data, expected_val): g = RandDeformGrid(**input_param) diff --git a/tests/test_rand_elastic_2d.py b/tests/transforms/test_rand_elastic_2d.py similarity index 99% rename from tests/test_rand_elastic_2d.py rename to tests/transforms/test_rand_elastic_2d.py index 7c3eefc389..920bdfab26 100644 --- a/tests/test_rand_elastic_2d.py +++ b/tests/transforms/test_rand_elastic_2d.py @@ -110,7 +110,6 @@ class TestRand2DElastic(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_2d_elastic(self, input_param, input_data, expected_val): g = Rand2DElastic(**input_param) diff --git a/tests/test_rand_elastic_3d.py b/tests/transforms/test_rand_elastic_3d.py similarity index 99% rename from tests/test_rand_elastic_3d.py rename to tests/transforms/test_rand_elastic_3d.py index df60bae710..b027f431fa 100644 --- a/tests/test_rand_elastic_3d.py +++ b/tests/transforms/test_rand_elastic_3d.py @@ -86,7 +86,6 @@ class TestRand3DElastic(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_3d_elastic(self, input_param, input_data, expected_val): g = Rand3DElastic(**input_param) diff --git a/tests/test_rand_elasticd_2d.py b/tests/transforms/test_rand_elasticd_2d.py similarity index 99% rename from tests/test_rand_elasticd_2d.py rename to tests/transforms/test_rand_elasticd_2d.py index 8a2b189531..90f171677a 100644 --- a/tests/test_rand_elasticd_2d.py +++ b/tests/transforms/test_rand_elasticd_2d.py @@ -160,7 +160,6 @@ class TestRand2DElasticd(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_2d_elasticd(self, input_param, input_data, expected_val): g = Rand2DElasticd(**input_param) diff --git a/tests/test_rand_elasticd_3d.py b/tests/transforms/test_rand_elasticd_3d.py similarity index 99% rename from tests/test_rand_elasticd_3d.py rename to tests/transforms/test_rand_elasticd_3d.py index 5d9242373c..ea47fbe80a 100644 --- a/tests/test_rand_elasticd_3d.py +++ b/tests/transforms/test_rand_elasticd_3d.py @@ -139,7 +139,6 @@ class TestRand3DElasticd(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_3d_elasticd(self, input_param, input_data, expected_val): g = Rand3DElasticd(**input_param) diff --git a/tests/test_rand_flip.py b/tests/transforms/test_rand_flip.py similarity index 99% rename from tests/test_rand_flip.py rename to tests/transforms/test_rand_flip.py index e15cd0b652..863b9f9704 100644 --- a/tests/test_rand_flip.py +++ b/tests/transforms/test_rand_flip.py @@ -28,7 +28,6 @@ class TestRandFlip(NumpyImageTestCase2D): - @parameterized.expand(INVALID_CASES) def test_invalid_inputs(self, _, spatial_axis, raises): with self.assertRaises(raises): diff --git a/tests/test_rand_flipd.py b/tests/transforms/test_rand_flipd.py similarity index 99% rename from tests/test_rand_flipd.py rename to tests/transforms/test_rand_flipd.py index e234eafbf0..3016cd2814 100644 --- a/tests/test_rand_flipd.py +++ b/tests/transforms/test_rand_flipd.py @@ -26,7 +26,6 @@ class TestRandFlipd(NumpyImageTestCase2D): - @parameterized.expand(VALID_CASES) def test_correct_results(self, _, spatial_axis): for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_rand_gaussian_noise.py b/tests/transforms/test_rand_gaussian_noise.py similarity index 99% rename from tests/test_rand_gaussian_noise.py rename to tests/transforms/test_rand_gaussian_noise.py index e2f04acb94..e67abb6f37 100644 --- a/tests/test_rand_gaussian_noise.py +++ b/tests/transforms/test_rand_gaussian_noise.py @@ -28,7 +28,6 @@ class TestRandGaussianNoise(NumpyImageTestCase2D): - @parameterized.expand(TESTS) def test_correct_results(self, _, im_type, mean, std, sample_std): seed = 0 diff --git a/tests/test_rand_gaussian_noised.py b/tests/transforms/test_rand_gaussian_noised.py similarity index 99% rename from tests/test_rand_gaussian_noised.py rename to tests/transforms/test_rand_gaussian_noised.py index 2f3d97db25..a30d965915 100644 --- a/tests/test_rand_gaussian_noised.py +++ b/tests/transforms/test_rand_gaussian_noised.py @@ -30,7 +30,6 @@ class TestRandGaussianNoised(NumpyImageTestCase2D): - @parameterized.expand(TESTS) def test_correct_results(self, _, im_type, keys, mean, std, sample_std): gaussian_fn = RandGaussianNoised( diff --git a/tests/test_rand_gaussian_sharpen.py b/tests/transforms/test_rand_gaussian_sharpen.py similarity index 99% rename from tests/test_rand_gaussian_sharpen.py rename to tests/transforms/test_rand_gaussian_sharpen.py index 470be5bc98..83c3f9d0de 100644 --- a/tests/test_rand_gaussian_sharpen.py +++ b/tests/transforms/test_rand_gaussian_sharpen.py @@ -128,7 +128,6 @@ class TestRandGaussianSharpen(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): converter = RandGaussianSharpen(**arguments) diff --git a/tests/test_rand_gaussian_sharpend.py b/tests/transforms/test_rand_gaussian_sharpend.py similarity index 99% rename from tests/test_rand_gaussian_sharpend.py rename to tests/transforms/test_rand_gaussian_sharpend.py index 564b79bb36..034047b530 100644 --- a/tests/test_rand_gaussian_sharpend.py +++ b/tests/transforms/test_rand_gaussian_sharpend.py @@ -131,7 +131,6 @@ class TestRandGaussianSharpend(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): converter = RandGaussianSharpend(**arguments) diff --git a/tests/test_rand_gaussian_smooth.py b/tests/transforms/test_rand_gaussian_smooth.py similarity index 99% rename from tests/test_rand_gaussian_smooth.py rename to tests/transforms/test_rand_gaussian_smooth.py index 1edb303bbc..82392746a3 100644 --- a/tests/test_rand_gaussian_smooth.py +++ b/tests/transforms/test_rand_gaussian_smooth.py @@ -86,7 +86,6 @@ class TestRandGaussianSmooth(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): converter = RandGaussianSmooth(**arguments) diff --git a/tests/test_rand_gaussian_smoothd.py b/tests/transforms/test_rand_gaussian_smoothd.py similarity index 99% rename from tests/test_rand_gaussian_smoothd.py rename to tests/transforms/test_rand_gaussian_smoothd.py index 10f26173db..67bdd6946c 100644 --- a/tests/test_rand_gaussian_smoothd.py +++ b/tests/transforms/test_rand_gaussian_smoothd.py @@ -86,7 +86,6 @@ class TestRandGaussianSmoothd(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): converter = RandGaussianSmoothd(**arguments) diff --git a/tests/test_rand_gibbs_noise.py b/tests/transforms/test_rand_gibbs_noise.py similarity index 99% rename from tests/test_rand_gibbs_noise.py rename to tests/transforms/test_rand_gibbs_noise.py index b779426206..21a17fd688 100644 --- a/tests/test_rand_gibbs_noise.py +++ b/tests/transforms/test_rand_gibbs_noise.py @@ -32,7 +32,6 @@ class TestRandGibbsNoise(unittest.TestCase): - def setUp(self): set_determinism(0) super().setUp() diff --git a/tests/test_rand_gibbs_noised.py b/tests/transforms/test_rand_gibbs_noised.py similarity index 99% rename from tests/test_rand_gibbs_noised.py rename to tests/transforms/test_rand_gibbs_noised.py index 47762fae4d..9cdd35a171 100644 --- a/tests/test_rand_gibbs_noised.py +++ b/tests/transforms/test_rand_gibbs_noised.py @@ -34,7 +34,6 @@ class TestRandGibbsNoised(unittest.TestCase): - def setUp(self): set_determinism(0) super().setUp() diff --git a/tests/test_rand_grid_distortion.py b/tests/transforms/test_rand_grid_distortion.py similarity index 99% rename from tests/test_rand_grid_distortion.py rename to tests/transforms/test_rand_grid_distortion.py index 98b470c468..4e0cda2d19 100644 --- a/tests/test_rand_grid_distortion.py +++ b/tests/transforms/test_rand_grid_distortion.py @@ -84,7 +84,6 @@ class TestRandGridDistortion(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_grid_distortion(self, input_param, seed, input_data, expected_val): g = RandGridDistortion(**input_param) diff --git a/tests/test_rand_grid_distortiond.py b/tests/transforms/test_rand_grid_distortiond.py similarity index 99% rename from tests/test_rand_grid_distortiond.py rename to tests/transforms/test_rand_grid_distortiond.py index ad03dd4642..8f8de144f6 100644 --- a/tests/test_rand_grid_distortiond.py +++ b/tests/transforms/test_rand_grid_distortiond.py @@ -77,7 +77,6 @@ class TestRandGridDistortiond(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_grid_distortiond(self, input_param, seed, input_data, expected_val_img, expected_val_mask): g = RandGridDistortiond(**input_param) diff --git a/tests/test_rand_histogram_shift.py b/tests/transforms/test_rand_histogram_shift.py similarity index 99% rename from tests/test_rand_histogram_shift.py rename to tests/transforms/test_rand_histogram_shift.py index fceca8098a..e9dba31bbb 100644 --- a/tests/test_rand_histogram_shift.py +++ b/tests/transforms/test_rand_histogram_shift.py @@ -56,7 +56,6 @@ class TestRandHistogramShift(unittest.TestCase): - @parameterized.expand(TESTS) def test_rand_histogram_shift(self, input_param, input_data, expected_val): g = RandHistogramShift(**input_param) diff --git a/tests/test_rand_k_space_spike_noise.py b/tests/transforms/test_rand_k_space_spike_noise.py similarity index 99% rename from tests/test_rand_k_space_spike_noise.py rename to tests/transforms/test_rand_k_space_spike_noise.py index 3096896ac6..eb0fa154f4 100644 --- a/tests/test_rand_k_space_spike_noise.py +++ b/tests/transforms/test_rand_k_space_spike_noise.py @@ -29,7 +29,6 @@ class TestRandKSpaceSpikeNoise(unittest.TestCase): - def setUp(self): set_determinism(0) super().setUp() diff --git a/tests/test_rand_k_space_spike_noised.py b/tests/transforms/test_rand_k_space_spike_noised.py similarity index 99% rename from tests/test_rand_k_space_spike_noised.py rename to tests/transforms/test_rand_k_space_spike_noised.py index 12ad15f3cc..41fb1fff01 100644 --- a/tests/test_rand_k_space_spike_noised.py +++ b/tests/transforms/test_rand_k_space_spike_noised.py @@ -30,7 +30,6 @@ class TestKSpaceSpikeNoised(unittest.TestCase): - def setUp(self): set_determinism(0) super().setUp() diff --git a/tests/test_rand_rician_noise.py b/tests/transforms/test_rand_rician_noise.py similarity index 99% rename from tests/test_rand_rician_noise.py rename to tests/transforms/test_rand_rician_noise.py index 013d76656d..6f461230e8 100644 --- a/tests/test_rand_rician_noise.py +++ b/tests/transforms/test_rand_rician_noise.py @@ -27,7 +27,6 @@ class TestRandRicianNoise(NumpyImageTestCase2D): - @parameterized.expand(TESTS) def test_correct_results(self, _, in_type, mean, std): seed = 0 diff --git a/tests/test_rand_rician_noised.py b/tests/transforms/test_rand_rician_noised.py similarity index 99% rename from tests/test_rand_rician_noised.py rename to tests/transforms/test_rand_rician_noised.py index 9132d191e3..beff228f21 100644 --- a/tests/test_rand_rician_noised.py +++ b/tests/transforms/test_rand_rician_noised.py @@ -29,7 +29,6 @@ class TestRandRicianNoisedNumpy(NumpyImageTestCase2D): - @parameterized.expand(TESTS) def test_correct_results(self, _, in_type, keys, mean, std): rician_fn = RandRicianNoised(keys=keys, prob=1.0, mean=mean, std=std, dtype=np.float64) diff --git a/tests/test_rand_rotate.py b/tests/transforms/test_rand_rotate.py similarity index 99% rename from tests/test_rand_rotate.py rename to tests/transforms/test_rand_rotate.py index 41ac3f8179..d1240e6c6a 100644 --- a/tests/test_rand_rotate.py +++ b/tests/transforms/test_rand_rotate.py @@ -73,7 +73,6 @@ class TestRandRotate2D(NumpyImageTestCase2D): - @parameterized.expand(TEST_CASES_2D) def test_correct_results(self, im_type, degrees, keep_size, mode, padding_mode, align_corners): init_param = { @@ -113,7 +112,6 @@ def test_correct_results(self, im_type, degrees, keep_size, mode, padding_mode, @unittest.skipIf(USE_COMPILED, "unit tests not for compiled version.") class TestRandRotate3D(NumpyImageTestCase3D): - @parameterized.expand(TEST_CASES_3D) def test_correct_results(self, im_type, x, y, z, keep_size, mode, padding_mode, align_corners, expected): init_param = { @@ -148,7 +146,6 @@ def test_correct_results(self, im_type, x, y, z, keep_size, mode, padding_mode, class TestRandRotateDtype(NumpyImageTestCase2D): - @parameterized.expand(TEST_CASES_2D) def test_correct_results(self, im_type, degrees, keep_size, mode, padding_mode, align_corners): rotate_fn = RandRotate( diff --git a/tests/test_rand_rotate90.py b/tests/transforms/test_rand_rotate90.py similarity index 99% rename from tests/test_rand_rotate90.py rename to tests/transforms/test_rand_rotate90.py index 864cc3789d..b5e36f2dde 100644 --- a/tests/test_rand_rotate90.py +++ b/tests/transforms/test_rand_rotate90.py @@ -23,7 +23,6 @@ class TestRandRotate90(NumpyImageTestCase2D): - def test_default(self): rotate = RandRotate90() for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_rand_rotate90d.py b/tests/transforms/test_rand_rotate90d.py similarity index 99% rename from tests/test_rand_rotate90d.py rename to tests/transforms/test_rand_rotate90d.py index c521a36e4c..0434f0b554 100644 --- a/tests/test_rand_rotate90d.py +++ b/tests/transforms/test_rand_rotate90d.py @@ -23,7 +23,6 @@ class TestRandRotate90d(NumpyImageTestCase2D): - def test_default(self): key = "test" rotate = RandRotate90d(keys=key) diff --git a/tests/test_rand_rotated.py b/tests/transforms/test_rand_rotated.py similarity index 99% rename from tests/test_rand_rotated.py rename to tests/transforms/test_rand_rotated.py index 1849cf0b00..46026b6fde 100644 --- a/tests/test_rand_rotated.py +++ b/tests/transforms/test_rand_rotated.py @@ -109,7 +109,6 @@ class TestRandRotated2D(NumpyImageTestCase2D): - @parameterized.expand(TEST_CASES_2D) def test_correct_results(self, im_type, degrees, keep_size, mode, padding_mode, align_corners): init_param = { @@ -154,7 +153,6 @@ def test_correct_results(self, im_type, degrees, keep_size, mode, padding_mode, @unittest.skipIf(USE_COMPILED, "unit tests not for compiled version.") class TestRandRotated3D(NumpyImageTestCase3D): - @parameterized.expand(TEST_CASES_3D) def test_correct_shapes(self, im_type, x, y, z, keep_size, mode, padding_mode, align_corners, expected): init_param = { diff --git a/tests/test_rand_scale_crop.py b/tests/transforms/test_rand_scale_crop.py similarity index 100% rename from tests/test_rand_scale_crop.py rename to tests/transforms/test_rand_scale_crop.py diff --git a/tests/test_rand_scale_cropd.py b/tests/transforms/test_rand_scale_cropd.py similarity index 100% rename from tests/test_rand_scale_cropd.py rename to tests/transforms/test_rand_scale_cropd.py diff --git a/tests/test_rand_scale_intensity.py b/tests/transforms/test_rand_scale_intensity.py similarity index 99% rename from tests/test_rand_scale_intensity.py rename to tests/transforms/test_rand_scale_intensity.py index febbe0058a..1b6ec56fdb 100644 --- a/tests/test_rand_scale_intensity.py +++ b/tests/transforms/test_rand_scale_intensity.py @@ -21,7 +21,6 @@ class TestRandScaleIntensity(NumpyImageTestCase2D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_value(self, p): scaler = RandScaleIntensity(factors=0.5, prob=1.0) diff --git a/tests/test_rand_scale_intensity_fixed_mean.py b/tests/transforms/test_rand_scale_intensity_fixed_mean.py similarity index 99% rename from tests/test_rand_scale_intensity_fixed_mean.py rename to tests/transforms/test_rand_scale_intensity_fixed_mean.py index 4acec4fb5d..ac45a9d463 100644 --- a/tests/test_rand_scale_intensity_fixed_mean.py +++ b/tests/transforms/test_rand_scale_intensity_fixed_mean.py @@ -21,7 +21,6 @@ class TestRandScaleIntensity(NumpyImageTestCase2D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_value(self, p): scaler = RandScaleIntensityFixedMean(prob=1.0, factors=0.5) diff --git a/tests/test_rand_scale_intensity_fixed_meand.py b/tests/transforms/test_rand_scale_intensity_fixed_meand.py similarity index 99% rename from tests/test_rand_scale_intensity_fixed_meand.py rename to tests/transforms/test_rand_scale_intensity_fixed_meand.py index 66058943e8..55111a4c2e 100644 --- a/tests/test_rand_scale_intensity_fixed_meand.py +++ b/tests/transforms/test_rand_scale_intensity_fixed_meand.py @@ -20,7 +20,6 @@ class TestRandScaleIntensityFixedMeand(NumpyImageTestCase2D): - def test_value(self): key = "img" for p in TEST_NDARRAYS: diff --git a/tests/test_rand_scale_intensityd.py b/tests/transforms/test_rand_scale_intensityd.py similarity index 99% rename from tests/test_rand_scale_intensityd.py rename to tests/transforms/test_rand_scale_intensityd.py index 4867369838..3a44ab60bb 100644 --- a/tests/test_rand_scale_intensityd.py +++ b/tests/transforms/test_rand_scale_intensityd.py @@ -20,7 +20,6 @@ class TestRandScaleIntensityd(NumpyImageTestCase2D): - def test_value(self): key = "img" for p in TEST_NDARRAYS: diff --git a/tests/test_rand_shift_intensity.py b/tests/transforms/test_rand_shift_intensity.py similarity index 99% rename from tests/test_rand_shift_intensity.py rename to tests/transforms/test_rand_shift_intensity.py index 0e1ab77fed..32021a6bb6 100644 --- a/tests/test_rand_shift_intensity.py +++ b/tests/transforms/test_rand_shift_intensity.py @@ -21,7 +21,6 @@ class TestRandShiftIntensity(NumpyImageTestCase2D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_value(self, p): shifter = RandShiftIntensity(offsets=1.0, prob=1.0) diff --git a/tests/test_rand_shift_intensityd.py b/tests/transforms/test_rand_shift_intensityd.py similarity index 99% rename from tests/test_rand_shift_intensityd.py rename to tests/transforms/test_rand_shift_intensityd.py index af4c1648d3..2faeb9288b 100644 --- a/tests/test_rand_shift_intensityd.py +++ b/tests/transforms/test_rand_shift_intensityd.py @@ -21,7 +21,6 @@ class TestRandShiftIntensityd(NumpyImageTestCase2D): - def test_value(self): key = "img" for p in TEST_NDARRAYS: diff --git a/tests/test_rand_simulate_low_resolution.py b/tests/transforms/test_rand_simulate_low_resolution.py similarity index 99% rename from tests/test_rand_simulate_low_resolution.py rename to tests/transforms/test_rand_simulate_low_resolution.py index 79e09b3f74..3a8032d152 100644 --- a/tests/test_rand_simulate_low_resolution.py +++ b/tests/transforms/test_rand_simulate_low_resolution.py @@ -71,7 +71,6 @@ class TestRandGaussianSmooth(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): randsimlowres = RandSimulateLowResolution(**arguments) diff --git a/tests/test_rand_simulate_low_resolutiond.py b/tests/transforms/test_rand_simulate_low_resolutiond.py similarity index 99% rename from tests/test_rand_simulate_low_resolutiond.py rename to tests/transforms/test_rand_simulate_low_resolutiond.py index 5b199a26f2..2a042aa92b 100644 --- a/tests/test_rand_simulate_low_resolutiond.py +++ b/tests/transforms/test_rand_simulate_low_resolutiond.py @@ -60,7 +60,6 @@ class TestRandGaussianSmoothd(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, arguments, image, expected_data): converter = RandSimulateLowResolutiond(**arguments) diff --git a/tests/test_rand_spatial_crop.py b/tests/transforms/test_rand_spatial_crop.py similarity index 100% rename from tests/test_rand_spatial_crop.py rename to tests/transforms/test_rand_spatial_crop.py diff --git a/tests/test_rand_spatial_crop_samples.py b/tests/transforms/test_rand_spatial_crop_samples.py similarity index 100% rename from tests/test_rand_spatial_crop_samples.py rename to tests/transforms/test_rand_spatial_crop_samples.py diff --git a/tests/test_rand_spatial_crop_samplesd.py b/tests/transforms/test_rand_spatial_crop_samplesd.py similarity index 99% rename from tests/test_rand_spatial_crop_samplesd.py rename to tests/transforms/test_rand_spatial_crop_samplesd.py index 80600f769f..27cd845e52 100644 --- a/tests/test_rand_spatial_crop_samplesd.py +++ b/tests/transforms/test_rand_spatial_crop_samplesd.py @@ -90,7 +90,6 @@ class TestRandSpatialCropSamplesd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, *TEST_CASE_2]) def test_shape(self, input_param, input_data, expected_shape, expected_last): xform = RandSpatialCropSamplesd(**input_param) diff --git a/tests/test_rand_spatial_cropd.py b/tests/transforms/test_rand_spatial_cropd.py similarity index 100% rename from tests/test_rand_spatial_cropd.py rename to tests/transforms/test_rand_spatial_cropd.py diff --git a/tests/test_rand_std_shift_intensity.py b/tests/transforms/test_rand_std_shift_intensity.py similarity index 99% rename from tests/test_rand_std_shift_intensity.py rename to tests/transforms/test_rand_std_shift_intensity.py index 66a7c2e4a9..300f630430 100644 --- a/tests/test_rand_std_shift_intensity.py +++ b/tests/transforms/test_rand_std_shift_intensity.py @@ -22,7 +22,6 @@ class TestRandStdShiftIntensity(NumpyImageTestCase2D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_value(self, p): np.random.seed(0) diff --git a/tests/test_rand_std_shift_intensityd.py b/tests/transforms/test_rand_std_shift_intensityd.py similarity index 99% rename from tests/test_rand_std_shift_intensityd.py rename to tests/transforms/test_rand_std_shift_intensityd.py index c90a068641..a2d984b9ac 100644 --- a/tests/test_rand_std_shift_intensityd.py +++ b/tests/transforms/test_rand_std_shift_intensityd.py @@ -20,7 +20,6 @@ class TestRandStdShiftIntensityd(NumpyImageTestCase2D): - def test_value(self): for p in TEST_NDARRAYS: key = "img" diff --git a/tests/test_rand_zoom.py b/tests/transforms/test_rand_zoom.py similarity index 99% rename from tests/test_rand_zoom.py rename to tests/transforms/test_rand_zoom.py index a1d309bfc6..2824b197b6 100644 --- a/tests/test_rand_zoom.py +++ b/tests/transforms/test_rand_zoom.py @@ -33,7 +33,6 @@ class TestRandZoom(NumpyImageTestCase2D): - @parameterized.expand(VALID_CASES) def test_correct_results(self, min_zoom, max_zoom, mode, keep_size, align_corners=None): for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_rand_zoomd.py b/tests/transforms/test_rand_zoomd.py similarity index 99% rename from tests/test_rand_zoomd.py rename to tests/transforms/test_rand_zoomd.py index bf98dd6e3e..1ad7bedbaf 100644 --- a/tests/test_rand_zoomd.py +++ b/tests/transforms/test_rand_zoomd.py @@ -31,7 +31,6 @@ class TestRandZoomd(NumpyImageTestCase2D): - @parameterized.expand(VALID_CASES) def test_correct_results(self, min_zoom, max_zoom, mode, align_corners, keep_size): key = "img" diff --git a/tests/test_randidentity.py b/tests/transforms/test_randidentity.py similarity index 99% rename from tests/test_randidentity.py rename to tests/transforms/test_randidentity.py index 65df216828..3344690b69 100644 --- a/tests/test_randidentity.py +++ b/tests/transforms/test_randidentity.py @@ -19,13 +19,11 @@ class T(mt.Transform): - def __call__(self, x): return x * 2 class TestIdentity(NumpyImageTestCase2D): - def test_identity(self): for p in TEST_NDARRAYS: img = p(self.imt) diff --git a/tests/test_random_order.py b/tests/transforms/test_random_order.py similarity index 98% rename from tests/test_random_order.py rename to tests/transforms/test_random_order.py index b38d2398fb..c7f8fe2742 100644 --- a/tests/test_random_order.py +++ b/tests/transforms/test_random_order.py @@ -26,11 +26,10 @@ from monai.transforms.compose import Compose from monai.utils import set_determinism from monai.utils.enums import TraceKeys -from tests.test_one_of import A, B, C, Inv, NonInv, X, Y +from tests.integration.test_one_of import A, B, C, Inv, NonInv, X, Y class InvC(Inv): - def __init__(self, keys): super().__init__(keys) self.fwd_fn = lambda x: x + 1 @@ -38,7 +37,6 @@ def __init__(self, keys): class InvD(Inv): - def __init__(self, keys): super().__init__(keys) self.fwd_fn = lambda x: x * 100 @@ -57,7 +55,6 @@ def __init__(self, keys): class TestRandomOrder(unittest.TestCase): - def test_empty_compose(self): c = RandomOrder() i = 1 @@ -116,7 +113,6 @@ def test_inverse(self, transform, invertible, use_metatensor): class TestRandomOrderAPITests(unittest.TestCase): - @staticmethod def data_from_keys(keys): if keys is None: diff --git a/tests/test_randtorchvisiond.py b/tests/transforms/test_randtorchvisiond.py similarity index 99% rename from tests/test_randtorchvisiond.py rename to tests/transforms/test_randtorchvisiond.py index 0606e854d5..e2324c4499 100644 --- a/tests/test_randtorchvisiond.py +++ b/tests/transforms/test_randtorchvisiond.py @@ -52,7 +52,6 @@ class TestRandTorchVisiond(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_value(self, input_param, input_data, expected_value): set_determinism(seed=0) diff --git a/tests/test_regularization.py b/tests/transforms/test_regularization.py similarity index 99% rename from tests/test_regularization.py rename to tests/transforms/test_regularization.py index 120d574911..48768a8d25 100644 --- a/tests/test_regularization.py +++ b/tests/transforms/test_regularization.py @@ -21,7 +21,6 @@ class TestMixup(unittest.TestCase): - def test_mixup(self): for dims in [2, 3]: shape = (6, 3) + (32,) * dims @@ -74,7 +73,6 @@ def test_mixupd(self): class TestCutMix(unittest.TestCase): - def test_cutmix(self): for dims in [2, 3]: shape = (6, 3) + (32,) * dims @@ -99,7 +97,6 @@ def test_cutmixd(self): class TestCutOut(unittest.TestCase): - def test_cutout(self): for dims in [2, 3]: shape = (6, 3) + (32,) * dims diff --git a/tests/test_remove_repeated_channel.py b/tests/transforms/test_remove_repeated_channel.py similarity index 99% rename from tests/test_remove_repeated_channel.py rename to tests/transforms/test_remove_repeated_channel.py index fd03f39c70..122e68bcc1 100644 --- a/tests/test_remove_repeated_channel.py +++ b/tests/transforms/test_remove_repeated_channel.py @@ -24,7 +24,6 @@ class TestRemoveRepeatedChannel(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_data, expected_shape): result = RemoveRepeatedChannel(**input_param)(input_data) diff --git a/tests/test_remove_repeated_channeld.py b/tests/transforms/test_remove_repeated_channeld.py similarity index 99% rename from tests/test_remove_repeated_channeld.py rename to tests/transforms/test_remove_repeated_channeld.py index d6c19af212..bd983cf185 100644 --- a/tests/test_remove_repeated_channeld.py +++ b/tests/transforms/test_remove_repeated_channeld.py @@ -34,7 +34,6 @@ class TestRemoveRepeatedChanneld(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, input_param, input_data, expected_shape): result = RemoveRepeatedChanneld(**input_param)(input_data) diff --git a/tests/test_repeat_channel.py b/tests/transforms/test_repeat_channel.py similarity index 99% rename from tests/test_repeat_channel.py rename to tests/transforms/test_repeat_channel.py index b2fc2ad71d..1859420479 100644 --- a/tests/test_repeat_channel.py +++ b/tests/transforms/test_repeat_channel.py @@ -24,7 +24,6 @@ class TestRepeatChannel(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, input_param, input_data, expected_shape): result = RepeatChannel(**input_param)(input_data) diff --git a/tests/test_repeat_channeld.py b/tests/transforms/test_repeat_channeld.py similarity index 99% rename from tests/test_repeat_channeld.py rename to tests/transforms/test_repeat_channeld.py index b38e09c28d..be1e298573 100644 --- a/tests/test_repeat_channeld.py +++ b/tests/transforms/test_repeat_channeld.py @@ -31,7 +31,6 @@ class TestRepeatChanneld(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, input_param, input_data, expected_shape): result = RepeatChanneld(**input_param)(input_data) diff --git a/tests/test_resample_backends.py b/tests/transforms/test_resample_backends.py similarity index 99% rename from tests/test_resample_backends.py rename to tests/transforms/test_resample_backends.py index a920d59b8f..e4ca3edc19 100644 --- a/tests/test_resample_backends.py +++ b/tests/transforms/test_resample_backends.py @@ -44,7 +44,6 @@ @SkipIfBeforePyTorchVersion((1, 9, 1)) class TestResampleBackends(unittest.TestCase): - @parameterized.expand(TEST_IDENTITY) def test_resample_identity(self, input_param, im_type, interp, padding, input_shape): """test resampling of an identity grid with padding 2, im_type, interp, padding, input_shape""" diff --git a/tests/test_resample_to_match.py b/tests/transforms/test_resample_to_match.py similarity index 99% rename from tests/test_resample_to_match.py rename to tests/transforms/test_resample_to_match.py index 4420b0b061..4b0f10898c 100644 --- a/tests/test_resample_to_match.py +++ b/tests/transforms/test_resample_to_match.py @@ -46,7 +46,6 @@ def get_rand_fname(len=10, suffix=".nii.gz"): @unittest.skipUnless(has_itk, "itk not installed") class TestResampleToMatch(unittest.TestCase): - @classmethod def setUpClass(cls): super(__class__, cls).setUpClass() diff --git a/tests/test_resample_to_matchd.py b/tests/transforms/test_resample_to_matchd.py similarity index 99% rename from tests/test_resample_to_matchd.py rename to tests/transforms/test_resample_to_matchd.py index fd8a419bfd..936d336a1f 100644 --- a/tests/test_resample_to_matchd.py +++ b/tests/transforms/test_resample_to_matchd.py @@ -36,7 +36,6 @@ def update_fname(d): class TestResampleToMatchd(unittest.TestCase): - @classmethod def setUpClass(cls): super(__class__, cls).setUpClass() diff --git a/tests/test_resampler.py b/tests/transforms/test_resampler.py similarity index 99% rename from tests/test_resampler.py rename to tests/transforms/test_resampler.py index 5e6d7d0e8e..0a0983c18e 100644 --- a/tests/test_resampler.py +++ b/tests/transforms/test_resampler.py @@ -152,7 +152,6 @@ class TestResample(unittest.TestCase): - @parameterized.expand(TESTS) def test_resample(self, input_param, input_data, expected_val): g = Resample(**input_param) diff --git a/tests/test_resize.py b/tests/transforms/test_resize.py similarity index 99% rename from tests/test_resize.py rename to tests/transforms/test_resize.py index 23784f5461..0e11035cf7 100644 --- a/tests/test_resize.py +++ b/tests/transforms/test_resize.py @@ -46,7 +46,6 @@ class TestResize(NumpyImageTestCase2D): - def test_invalid_inputs(self): with self.assertRaises(ValueError): resize = Resize(spatial_size=(128, 128, 3), mode="order") diff --git a/tests/test_resize_with_pad_or_crop.py b/tests/transforms/test_resize_with_pad_or_crop.py similarity index 99% rename from tests/test_resize_with_pad_or_crop.py rename to tests/transforms/test_resize_with_pad_or_crop.py index c80f7d38e8..bcda36adeb 100644 --- a/tests/test_resize_with_pad_or_crop.py +++ b/tests/transforms/test_resize_with_pad_or_crop.py @@ -48,7 +48,6 @@ class TestResizeWithPadOrCrop(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_pad_shape(self, input_param, input_shape, expected_shape, _): for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_resize_with_pad_or_cropd.py b/tests/transforms/test_resize_with_pad_or_cropd.py similarity index 98% rename from tests/test_resize_with_pad_or_cropd.py rename to tests/transforms/test_resize_with_pad_or_cropd.py index 04f7c16622..2e162d3f69 100644 --- a/tests/test_resize_with_pad_or_cropd.py +++ b/tests/transforms/test_resize_with_pad_or_cropd.py @@ -21,8 +21,8 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import ResizeWithPadOrCropd from monai.transforms.lazy.functional import apply_pending -from tests.test_resize_with_pad_or_crop import TESTS_PENDING_MODE from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, pytorch_after +from tests.transforms.test_resize_with_pad_or_crop import TESTS_PENDING_MODE TEST_CASES = [ [{"keys": "img", "spatial_size": [15, 8, 8], "mode": "constant"}, {"img": np.zeros((3, 8, 8, 4))}, (3, 15, 8, 8)], @@ -46,7 +46,6 @@ class TestResizeWithPadOrCropd(unittest.TestCase): - @parameterized.expand(TEST_CASES) def test_pad_shape(self, input_param, input_data, expected_val): for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_resized.py b/tests/transforms/test_resized.py similarity index 99% rename from tests/test_resized.py rename to tests/transforms/test_resized.py index 35491a9eb0..926e0fa58a 100644 --- a/tests/test_resized.py +++ b/tests/transforms/test_resized.py @@ -66,7 +66,6 @@ @SkipIfAtLeastPyTorchVersion((2, 2, 0)) # https://github.com/Project-MONAI/MONAI/issues/7445 class TestResized(NumpyImageTestCase2D): - def test_invalid_inputs(self): with self.assertRaises(ValueError): resize = Resized(keys="img", spatial_size=(128, 128, 3), mode="order") diff --git a/tests/test_rotate.py b/tests/transforms/test_rotate.py similarity index 99% rename from tests/test_rotate.py rename to tests/transforms/test_rotate.py index fda1d212a8..709a3bd6f4 100644 --- a/tests/test_rotate.py +++ b/tests/transforms/test_rotate.py @@ -58,7 +58,6 @@ class TestRotate2D(NumpyImageTestCase2D): - @parameterized.expand(TEST_CASES_2D) def test_correct_results(self, im_type, angle, keep_size, mode, padding_mode, align_corners): init_param = { @@ -97,7 +96,6 @@ def test_correct_results(self, im_type, angle, keep_size, mode, padding_mode, al class TestRotate3D(NumpyImageTestCase3D): - @parameterized.expand(TEST_CASES_3D) def test_correct_results(self, im_type, angle, keep_size, mode, padding_mode, align_corners): init_param = { diff --git a/tests/test_rotate90.py b/tests/transforms/test_rotate90.py similarity index 99% rename from tests/test_rotate90.py rename to tests/transforms/test_rotate90.py index 93e4f19603..9c71004c65 100644 --- a/tests/test_rotate90.py +++ b/tests/transforms/test_rotate90.py @@ -31,7 +31,6 @@ class TestRotate90(NumpyImageTestCase2D): - def test_rotate90_default(self): rotate = Rotate90() for p in TEST_NDARRAYS_ALL: @@ -103,7 +102,6 @@ def test_prob_k_spatial_axes(self): class TestRotate903d(NumpyImageTestCase3D): - def test_rotate90_default(self): rotate = Rotate90() for p in TEST_NDARRAYS_ALL: @@ -171,7 +169,6 @@ def test_prob_k_spatial_axes(self): @unittest.skipUnless(optional_import("scipy")[1], "Requires scipy library.") class TestRot90Consistency(unittest.TestCase): - @parameterized.expand([[2], [3], [4]]) def test_affine_rot90(self, s): """s""" diff --git a/tests/test_rotate90d.py b/tests/transforms/test_rotate90d.py similarity index 99% rename from tests/test_rotate90d.py rename to tests/transforms/test_rotate90d.py index 09adfd3411..d9f8aeea8a 100644 --- a/tests/test_rotate90d.py +++ b/tests/transforms/test_rotate90d.py @@ -22,7 +22,6 @@ class TestRotate90d(NumpyImageTestCase2D): - def test_rotate90_default(self): key = "test" rotate = Rotate90d(keys=key) diff --git a/tests/test_rotated.py b/tests/transforms/test_rotated.py similarity index 99% rename from tests/test_rotated.py rename to tests/transforms/test_rotated.py index 904cf3718c..09c2304c6b 100644 --- a/tests/test_rotated.py +++ b/tests/transforms/test_rotated.py @@ -43,7 +43,6 @@ @unittest.skipIf(USE_COMPILED, "unittests are not designed for both USE_COMPILED=True/False") class TestRotated2D(NumpyImageTestCase2D): - @parameterized.expand(TEST_CASES_2D) def test_correct_results(self, im_type, angle, keep_size, mode, padding_mode, align_corners): init_param = { @@ -95,7 +94,6 @@ def test_correct_results(self, im_type, angle, keep_size, mode, padding_mode, al @unittest.skipIf(USE_COMPILED, "unittests are not designed for both USE_COMPILED=True/False") class TestRotated3D(NumpyImageTestCase3D): - @parameterized.expand(TEST_CASES_3D) def test_correct_results(self, im_type, angle, keep_size, mode, padding_mode, align_corners): init_param = { @@ -145,7 +143,6 @@ def test_correct_results(self, im_type, angle, keep_size, mode, padding_mode, al @unittest.skipIf(USE_COMPILED, "unittests are not designed for both USE_COMPILED=True/False") class TestRotated3DXY(NumpyImageTestCase3D): - @parameterized.expand(TEST_CASES_3D) def test_correct_results(self, im_type, angle, keep_size, mode, padding_mode, align_corners): rotate_fn = Rotated( diff --git a/tests/test_save_classificationd.py b/tests/transforms/test_save_classificationd.py similarity index 100% rename from tests/test_save_classificationd.py rename to tests/transforms/test_save_classificationd.py diff --git a/tests/test_save_image.py b/tests/transforms/test_save_image.py similarity index 100% rename from tests/test_save_image.py rename to tests/transforms/test_save_image.py diff --git a/tests/test_save_imaged.py b/tests/transforms/test_save_imaged.py similarity index 100% rename from tests/test_save_imaged.py rename to tests/transforms/test_save_imaged.py diff --git a/tests/test_savitzky_golay_smooth.py b/tests/transforms/test_savitzky_golay_smooth.py similarity index 99% rename from tests/test_savitzky_golay_smooth.py rename to tests/transforms/test_savitzky_golay_smooth.py index 7516f40029..cf92fdb7b4 100644 --- a/tests/test_savitzky_golay_smooth.py +++ b/tests/transforms/test_savitzky_golay_smooth.py @@ -60,7 +60,6 @@ class TestSavitzkyGolaySmooth(unittest.TestCase): - @parameterized.expand( [TEST_CASE_SINGLE_VALUE, TEST_CASE_2D_AXIS_2, TEST_CASE_SINE_SMOOTH, TEST_CASE_SINGLE_VALUE_REP] ) diff --git a/tests/test_savitzky_golay_smoothd.py b/tests/transforms/test_savitzky_golay_smoothd.py similarity index 99% rename from tests/test_savitzky_golay_smoothd.py rename to tests/transforms/test_savitzky_golay_smoothd.py index f347e7a017..ff981de097 100644 --- a/tests/test_savitzky_golay_smoothd.py +++ b/tests/transforms/test_savitzky_golay_smoothd.py @@ -60,7 +60,6 @@ class TestSavitzkyGolaySmoothd(unittest.TestCase): - @parameterized.expand( [TEST_CASE_SINGLE_VALUE, TEST_CASE_2D_AXIS_2, TEST_CASE_SINE_SMOOTH, TEST_CASE_SINGLE_VALUE_REP] ) diff --git a/tests/test_scale_intensity.py b/tests/transforms/test_scale_intensity.py similarity index 99% rename from tests/test_scale_intensity.py rename to tests/transforms/test_scale_intensity.py index 42ea598369..7047ddef1a 100644 --- a/tests/test_scale_intensity.py +++ b/tests/transforms/test_scale_intensity.py @@ -22,7 +22,6 @@ class TestScaleIntensity(NumpyImageTestCase2D): - @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_range_scale(self, p): scaler = ScaleIntensity(minv=1.0, maxv=2.0) diff --git a/tests/test_scale_intensity_fixed_mean.py b/tests/transforms/test_scale_intensity_fixed_mean.py similarity index 99% rename from tests/test_scale_intensity_fixed_mean.py rename to tests/transforms/test_scale_intensity_fixed_mean.py index da82dc8f5c..7ea09ce3bc 100644 --- a/tests/test_scale_intensity_fixed_mean.py +++ b/tests/transforms/test_scale_intensity_fixed_mean.py @@ -21,7 +21,6 @@ class TestScaleIntensityFixedMean(NumpyImageTestCase2D): - def test_factor_scale(self): for p in TEST_NDARRAYS: scaler = ScaleIntensityFixedMean(factor=0.1, fixed_mean=False) diff --git a/tests/test_scale_intensity_range.py b/tests/transforms/test_scale_intensity_range.py similarity index 99% rename from tests/test_scale_intensity_range.py rename to tests/transforms/test_scale_intensity_range.py index cb4df12a93..b39ce6dd37 100644 --- a/tests/test_scale_intensity_range.py +++ b/tests/transforms/test_scale_intensity_range.py @@ -20,7 +20,6 @@ class IntensityScaleIntensityRange(NumpyImageTestCase2D): - def test_image_scale_intensity_range(self): scaler = ScaleIntensityRange(a_min=20, a_max=108, b_min=50, b_max=80, dtype=np.uint8) for p in TEST_NDARRAYS: diff --git a/tests/test_scale_intensity_ranged.py b/tests/transforms/test_scale_intensity_ranged.py similarity index 99% rename from tests/test_scale_intensity_ranged.py rename to tests/transforms/test_scale_intensity_ranged.py index 16477bcf61..a2015dd2fa 100644 --- a/tests/test_scale_intensity_ranged.py +++ b/tests/transforms/test_scale_intensity_ranged.py @@ -18,7 +18,6 @@ class IntensityScaleIntensityRanged(NumpyImageTestCase2D): - def test_image_scale_intensity_ranged(self): key = "img" scaler = ScaleIntensityRanged(keys=key, a_min=20, a_max=108, b_min=50, b_max=80) diff --git a/tests/test_scale_intensityd.py b/tests/transforms/test_scale_intensityd.py similarity index 99% rename from tests/test_scale_intensityd.py rename to tests/transforms/test_scale_intensityd.py index ef6b9b587c..29c2a937dd 100644 --- a/tests/test_scale_intensityd.py +++ b/tests/transforms/test_scale_intensityd.py @@ -20,7 +20,6 @@ class TestScaleIntensityd(NumpyImageTestCase2D): - def test_range_scale(self): key = "img" for p in TEST_NDARRAYS: diff --git a/tests/test_select_itemsd.py b/tests/transforms/test_select_itemsd.py similarity index 100% rename from tests/test_select_itemsd.py rename to tests/transforms/test_select_itemsd.py diff --git a/tests/test_shift_intensity.py b/tests/transforms/test_shift_intensity.py similarity index 99% rename from tests/test_shift_intensity.py rename to tests/transforms/test_shift_intensity.py index 1f15f92a51..70a6c7bf79 100644 --- a/tests/test_shift_intensity.py +++ b/tests/transforms/test_shift_intensity.py @@ -20,7 +20,6 @@ class TestShiftIntensity(NumpyImageTestCase2D): - def test_value(self): shifter = ShiftIntensity(offset=1.0) result = shifter(self.imt) diff --git a/tests/test_shift_intensityd.py b/tests/transforms/test_shift_intensityd.py similarity index 99% rename from tests/test_shift_intensityd.py rename to tests/transforms/test_shift_intensityd.py index b7d8f1be04..063202db0f 100644 --- a/tests/test_shift_intensityd.py +++ b/tests/transforms/test_shift_intensityd.py @@ -21,7 +21,6 @@ class TestShiftIntensityd(NumpyImageTestCase2D): - def test_value(self): key = "img" for p in TEST_NDARRAYS: diff --git a/tests/test_signal_continuouswavelet.py b/tests/transforms/test_signal_continuouswavelet.py similarity index 91% rename from tests/test_signal_continuouswavelet.py rename to tests/transforms/test_signal_continuouswavelet.py index 7e6ee8b105..992ef91b38 100644 --- a/tests/test_signal_continuouswavelet.py +++ b/tests/transforms/test_signal_continuouswavelet.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from unittest import skipUnless import numpy as np @@ -22,14 +23,14 @@ from monai.utils import optional_import _, has_pywt = optional_import("pywt") -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") VALID_CASES = [("mexh", 150, 500)] EXPECTED_RESULTS = [(6, 150, 2000)] @skipUnless(has_pywt, "pywt required") class TestSignalContinousWavelet(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, type, length, frequency): self.assertIsInstance(SignalContinuousWavelet(type, length, frequency), SignalContinuousWavelet) diff --git a/tests/test_signal_fillempty.py b/tests/transforms/test_signal_fillempty.py similarity index 93% rename from tests/test_signal_fillempty.py rename to tests/transforms/test_signal_fillempty.py index b32c9924b3..d9166db986 100644 --- a/tests/test_signal_fillempty.py +++ b/tests/transforms/test_signal_fillempty.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path import numpy as np import torch @@ -21,12 +22,12 @@ from monai.utils.type_conversion import convert_to_tensor from tests.test_utils import SkipIfBeforePyTorchVersion -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") @SkipIfBeforePyTorchVersion((1, 9)) class TestSignalFillEmptyNumpy(unittest.TestCase): - def test_correct_parameters_multi_channels(self): self.assertIsInstance(SignalFillEmpty(replacement=0.0), SignalFillEmpty) sig = np.load(TEST_SIGNAL) @@ -38,7 +39,6 @@ def test_correct_parameters_multi_channels(self): @SkipIfBeforePyTorchVersion((1, 9)) class TestSignalFillEmptyTorch(unittest.TestCase): - def test_correct_parameters_multi_channels(self): self.assertIsInstance(SignalFillEmpty(replacement=0.0), SignalFillEmpty) sig = convert_to_tensor(np.load(TEST_SIGNAL)) diff --git a/tests/test_signal_fillemptyd.py b/tests/transforms/test_signal_fillemptyd.py similarity index 93% rename from tests/test_signal_fillemptyd.py rename to tests/transforms/test_signal_fillemptyd.py index d287e83bda..01ab135eb0 100644 --- a/tests/test_signal_fillemptyd.py +++ b/tests/transforms/test_signal_fillemptyd.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path import numpy as np import torch @@ -21,12 +22,12 @@ from monai.utils.type_conversion import convert_to_tensor from tests.test_utils import SkipIfBeforePyTorchVersion -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") @SkipIfBeforePyTorchVersion((1, 9)) class TestSignalFillEmptyNumpy(unittest.TestCase): - def test_correct_parameters_multi_channels(self): self.assertIsInstance(SignalFillEmptyd(replacement=0.0), SignalFillEmptyd) sig = np.load(TEST_SIGNAL) @@ -42,7 +43,6 @@ def test_correct_parameters_multi_channels(self): @SkipIfBeforePyTorchVersion((1, 9)) class TestSignalFillEmptyTorch(unittest.TestCase): - def test_correct_parameters_multi_channels(self): self.assertIsInstance(SignalFillEmptyd(replacement=0.0), SignalFillEmptyd) sig = convert_to_tensor(np.load(TEST_SIGNAL)) diff --git a/tests/test_signal_rand_add_gaussiannoise.py b/tests/transforms/test_signal_rand_add_gaussiannoise.py similarity index 93% rename from tests/test_signal_rand_add_gaussiannoise.py rename to tests/transforms/test_signal_rand_add_gaussiannoise.py index e5c9eba8a2..a2713ad893 100644 --- a/tests/test_signal_rand_add_gaussiannoise.py +++ b/tests/transforms/test_signal_rand_add_gaussiannoise.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path import numpy as np from parameterized import parameterized @@ -20,12 +21,12 @@ from monai.transforms import SignalRandAddGaussianNoise from monai.utils.type_conversion import convert_to_tensor -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") VALID_CASES = [([0.0, 0.02],)] class TestSignalRandAddGaussianNoiseNumpy(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, boundaries): self.assertIsInstance(SignalRandAddGaussianNoise(boundaries), SignalRandAddGaussianNoise) @@ -36,7 +37,6 @@ def test_correct_parameters_multi_channels(self, boundaries): class TestSignalRandAddGaussianNoiseTorch(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, boundaries): self.assertIsInstance(SignalRandAddGaussianNoise(boundaries), SignalRandAddGaussianNoise) diff --git a/tests/test_signal_rand_add_sine.py b/tests/transforms/test_signal_rand_add_sine.py similarity index 93% rename from tests/test_signal_rand_add_sine.py rename to tests/transforms/test_signal_rand_add_sine.py index 4ba91247dd..8f37806796 100644 --- a/tests/test_signal_rand_add_sine.py +++ b/tests/transforms/test_signal_rand_add_sine.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path import numpy as np from parameterized import parameterized @@ -20,7 +21,8 @@ from monai.transforms import SignalRandAddSine from monai.utils.type_conversion import convert_to_tensor -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") VALID_CASES = [([0.0, 1.0], [0.0, 0.5]), ([0.0, 1.0], [0.01, 0.1])] diff --git a/tests/test_signal_rand_add_sine_partial.py b/tests/transforms/test_signal_rand_add_sine_partial.py similarity index 93% rename from tests/test_signal_rand_add_sine_partial.py rename to tests/transforms/test_signal_rand_add_sine_partial.py index 71b67747a2..61fcd9c40e 100644 --- a/tests/test_signal_rand_add_sine_partial.py +++ b/tests/transforms/test_signal_rand_add_sine_partial.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path import numpy as np from parameterized import parameterized @@ -20,12 +21,12 @@ from monai.transforms import SignalRandAddSinePartial from monai.utils.type_conversion import convert_to_tensor -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") VALID_CASES = [([0.0, 1.0], [0.1, 0.6], [0.0, 0.4])] class TestSignalRandAddSinePartialNumpy(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, boundaries, frequencies, fraction): self.assertIsInstance(SignalRandAddSinePartial(boundaries, frequencies, fraction), SignalRandAddSinePartial) @@ -36,7 +37,6 @@ def test_correct_parameters_multi_channels(self, boundaries, frequencies, fracti class TestSignalRandAddSinePartialTorch(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, boundaries, frequencies, fraction): self.assertIsInstance(SignalRandAddSinePartial(boundaries, frequencies, fraction), SignalRandAddSinePartial) diff --git a/tests/test_signal_rand_add_squarepulse.py b/tests/transforms/test_signal_rand_add_squarepulse.py similarity index 94% rename from tests/test_signal_rand_add_squarepulse.py rename to tests/transforms/test_signal_rand_add_squarepulse.py index 552d35f55c..da3fe44a75 100644 --- a/tests/test_signal_rand_add_squarepulse.py +++ b/tests/transforms/test_signal_rand_add_squarepulse.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from unittest import skipUnless import numpy as np @@ -24,14 +25,14 @@ from tests.test_utils import SkipIfBeforePyTorchVersion _, has_scipy = optional_import("scipy") -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") VALID_CASES = [([0.0, 1.0], [0.001, 0.2])] @skipUnless(has_scipy, "scipy required") @SkipIfBeforePyTorchVersion((1, 10, 1)) class TestSignalRandAddSquarePulseNumpy(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, boundaries, frequencies): self.assertIsInstance(SignalRandAddSquarePulse(boundaries, frequencies), SignalRandAddSquarePulse) @@ -44,7 +45,6 @@ def test_correct_parameters_multi_channels(self, boundaries, frequencies): @skipUnless(has_scipy, "scipy required") @SkipIfBeforePyTorchVersion((1, 10, 1)) class TestSignalRandAddSquarePulseTorch(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, boundaries, frequencies): self.assertIsInstance(SignalRandAddSquarePulse(boundaries, frequencies), SignalRandAddSquarePulse) diff --git a/tests/test_signal_rand_add_squarepulse_partial.py b/tests/transforms/test_signal_rand_add_squarepulse_partial.py similarity index 94% rename from tests/test_signal_rand_add_squarepulse_partial.py rename to tests/transforms/test_signal_rand_add_squarepulse_partial.py index 9ac564c2c1..c5c53b602f 100644 --- a/tests/test_signal_rand_add_squarepulse_partial.py +++ b/tests/transforms/test_signal_rand_add_squarepulse_partial.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from unittest import skipUnless import numpy as np @@ -24,14 +25,14 @@ from tests.test_utils import SkipIfBeforePyTorchVersion _, has_scipy = optional_import("scipy") -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") VALID_CASES = [([0.0, 1.0], [0.001, 0.2], [0.0, 0.4])] @skipUnless(has_scipy, "scipy required") @SkipIfBeforePyTorchVersion((1, 10, 1)) class TestSignalRandAddSquarePulsePartialNumpy(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, boundaries, frequencies, fraction): self.assertIsInstance( @@ -46,7 +47,6 @@ def test_correct_parameters_multi_channels(self, boundaries, frequencies, fracti @skipUnless(has_scipy, "scipy required") @SkipIfBeforePyTorchVersion((1, 10, 1)) class TestSignalRandAddSquarePulsePartialTorch(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, boundaries, frequencies, fraction): self.assertIsInstance( diff --git a/tests/test_signal_rand_drop.py b/tests/transforms/test_signal_rand_drop.py similarity index 92% rename from tests/test_signal_rand_drop.py rename to tests/transforms/test_signal_rand_drop.py index bf2db75a6a..faba528720 100644 --- a/tests/test_signal_rand_drop.py +++ b/tests/transforms/test_signal_rand_drop.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path import numpy as np from parameterized import parameterized @@ -20,12 +21,12 @@ from monai.transforms import SignalRandDrop from monai.utils.type_conversion import convert_to_tensor -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") VALID_CASES = [([0.0, 1.0],), ([0.01, 0.1],)] class TestSignalRandDropNumpy(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, boundaries): self.assertIsInstance(SignalRandDrop(boundaries), SignalRandDrop) @@ -36,7 +37,6 @@ def test_correct_parameters_multi_channels(self, boundaries): class TestSignalRandDropTorch(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, boundaries): self.assertIsInstance(SignalRandDrop(boundaries), SignalRandDrop) diff --git a/tests/test_signal_rand_scale.py b/tests/transforms/test_signal_rand_scale.py similarity index 92% rename from tests/test_signal_rand_scale.py rename to tests/transforms/test_signal_rand_scale.py index c040c59a1f..fe0782a4a9 100644 --- a/tests/test_signal_rand_scale.py +++ b/tests/transforms/test_signal_rand_scale.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path import numpy as np from parameterized import parameterized @@ -20,7 +21,8 @@ from monai.transforms import SignalRandScale from monai.utils.type_conversion import convert_to_tensor -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") VALID_CASES = [([-1.0, 1.0],), ([0.01, 0.1],)] diff --git a/tests/test_signal_rand_shift.py b/tests/transforms/test_signal_rand_shift.py similarity index 93% rename from tests/test_signal_rand_shift.py rename to tests/transforms/test_signal_rand_shift.py index 96809e7446..1bc779e879 100644 --- a/tests/test_signal_rand_shift.py +++ b/tests/transforms/test_signal_rand_shift.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from unittest import skipUnless import numpy as np @@ -23,13 +24,13 @@ from monai.utils.type_conversion import convert_to_tensor _, has_scipy = optional_import("scipy") -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") VALID_CASES = [("wrap", 0.0, [-1.0, 1.0])] @skipUnless(has_scipy, "scipy required") class TestSignalRandShiftNumpy(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, mode, filling, boundaries): self.assertIsInstance(SignalRandShift(mode, filling, boundaries), SignalRandShift) @@ -41,7 +42,6 @@ def test_correct_parameters_multi_channels(self, mode, filling, boundaries): @skipUnless(has_scipy, "scipy required") class TestSignalRandShiftTorch(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, mode, filling, boundaries): self.assertIsInstance(SignalRandShift(mode, filling, boundaries), SignalRandShift) diff --git a/tests/test_signal_remove_frequency.py b/tests/transforms/test_signal_remove_frequency.py similarity index 95% rename from tests/test_signal_remove_frequency.py rename to tests/transforms/test_signal_remove_frequency.py index 9f795ce68b..16c3e22f59 100644 --- a/tests/test_signal_remove_frequency.py +++ b/tests/transforms/test_signal_remove_frequency.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from unittest import skipUnless import numpy as np @@ -25,13 +26,13 @@ _, has_scipy = optional_import("scipy") _, has_torchaudio = optional_import("torchaudio") -TEST_SIGNAL = os.path.join(os.path.dirname(__file__), "testing_data", "signal.npy") +TESTS_PATH = Path(__file__).parents[1] +TEST_SIGNAL = os.path.join(TESTS_PATH, "testing_data", "signal.npy") VALID_CASES = [(60, 1, 500)] @skipUnless(has_scipy and has_torchaudio, "scipy and torchaudio are required") class TestSignalRemoveFrequencyNumpy(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, frequency, quality_factor, sampling_freq): self.assertIsInstance(SignalRemoveFrequency(frequency, quality_factor, sampling_freq), SignalRemoveFrequency) @@ -50,7 +51,6 @@ def test_correct_parameters_multi_channels(self, frequency, quality_factor, samp @skipUnless(has_scipy and has_torchaudio, "scipy and torchaudio are required") class TestSignalRemoveFrequencyTorch(unittest.TestCase): - @parameterized.expand(VALID_CASES) def test_correct_parameters_multi_channels(self, frequency, quality_factor, sampling_freq): self.assertIsInstance(SignalRemoveFrequency(frequency, quality_factor, sampling_freq), SignalRemoveFrequency) diff --git a/tests/test_smooth_field.py b/tests/transforms/test_smooth_field.py similarity index 99% rename from tests/test_smooth_field.py rename to tests/transforms/test_smooth_field.py index 45af048ebc..a92b98582f 100644 --- a/tests/test_smooth_field.py +++ b/tests/transforms/test_smooth_field.py @@ -88,7 +88,6 @@ class TestSmoothField(unittest.TestCase): - @parameterized.expand(TESTS_CONTRAST) def test_rand_smooth_field_adjust_contrastd(self, input_param, input_data, expected_val): g = RandSmoothFieldAdjustContrastd(**input_param) diff --git a/tests/test_sobel_gradient.py b/tests/transforms/test_sobel_gradient.py similarity index 100% rename from tests/test_sobel_gradient.py rename to tests/transforms/test_sobel_gradient.py diff --git a/tests/test_sobel_gradientd.py b/tests/transforms/test_sobel_gradientd.py similarity index 100% rename from tests/test_sobel_gradientd.py rename to tests/transforms/test_sobel_gradientd.py diff --git a/tests/test_spacing.py b/tests/transforms/test_spacing.py similarity index 99% rename from tests/test_spacing.py rename to tests/transforms/test_spacing.py index 1b1f5af237..f3ef25d1d2 100644 --- a/tests/test_spacing.py +++ b/tests/transforms/test_spacing.py @@ -271,7 +271,6 @@ @skip_if_quick class TestSpacingCase(unittest.TestCase): - @parameterized.expand(TESTS) def test_spacing( self, diff --git a/tests/test_spacingd.py b/tests/transforms/test_spacingd.py similarity index 99% rename from tests/test_spacingd.py rename to tests/transforms/test_spacingd.py index 6bb4ed542c..a3dcaf5c67 100644 --- a/tests/test_spacingd.py +++ b/tests/transforms/test_spacingd.py @@ -105,7 +105,6 @@ class TestSpacingDCase(unittest.TestCase): - @parameterized.expand(TESTS) def test_spacingd(self, _, data, kw_args, expected_shape, expected_affine, device): data = {k: v.to(device) for k, v in data.items()} diff --git a/tests/test_spatial_crop.py b/tests/transforms/test_spatial_crop.py similarity index 100% rename from tests/test_spatial_crop.py rename to tests/transforms/test_spatial_crop.py diff --git a/tests/test_spatial_cropd.py b/tests/transforms/test_spatial_cropd.py similarity index 100% rename from tests/test_spatial_cropd.py rename to tests/transforms/test_spatial_cropd.py diff --git a/tests/test_spatial_pad.py b/tests/transforms/test_spatial_pad.py similarity index 100% rename from tests/test_spatial_pad.py rename to tests/transforms/test_spatial_pad.py diff --git a/tests/test_spatial_padd.py b/tests/transforms/test_spatial_padd.py similarity index 100% rename from tests/test_spatial_padd.py rename to tests/transforms/test_spatial_padd.py diff --git a/tests/test_spatial_resample.py b/tests/transforms/test_spatial_resample.py similarity index 99% rename from tests/test_spatial_resample.py rename to tests/transforms/test_spatial_resample.py index 874b45f9b3..7962c77f1c 100644 --- a/tests/test_spatial_resample.py +++ b/tests/transforms/test_spatial_resample.py @@ -133,7 +133,6 @@ class TestSpatialResample(unittest.TestCase): - @parameterized.expand(TESTS) def test_flips(self, img, device, data_param, expected_output): for p in TEST_NDARRAYS_ALL: diff --git a/tests/test_squeezedim.py b/tests/transforms/test_squeezedim.py similarity index 99% rename from tests/test_squeezedim.py rename to tests/transforms/test_squeezedim.py index 477eef92c2..5fd333d821 100644 --- a/tests/test_squeezedim.py +++ b/tests/transforms/test_squeezedim.py @@ -32,7 +32,6 @@ class TestSqueezeDim(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, input_param, test_data, expected_shape): result = SqueezeDim(**input_param)(test_data) diff --git a/tests/test_squeezedimd.py b/tests/transforms/test_squeezedimd.py similarity index 99% rename from tests/test_squeezedimd.py rename to tests/transforms/test_squeezedimd.py index d97a05b6f8..134dba6a26 100644 --- a/tests/test_squeezedimd.py +++ b/tests/transforms/test_squeezedimd.py @@ -80,7 +80,6 @@ class TestSqueezeDim(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, input_param, test_data, expected_shape): result = SqueezeDimd(**input_param)(test_data) diff --git a/tests/test_std_shift_intensity.py b/tests/transforms/test_std_shift_intensity.py similarity index 99% rename from tests/test_std_shift_intensity.py rename to tests/transforms/test_std_shift_intensity.py index 8d0469698a..9f5f6ac54b 100644 --- a/tests/test_std_shift_intensity.py +++ b/tests/transforms/test_std_shift_intensity.py @@ -21,7 +21,6 @@ class TestStdShiftIntensity(NumpyImageTestCase2D): - def test_value(self): for p in TEST_NDARRAYS: imt = p(self.imt) diff --git a/tests/test_std_shift_intensityd.py b/tests/transforms/test_std_shift_intensityd.py similarity index 99% rename from tests/test_std_shift_intensityd.py rename to tests/transforms/test_std_shift_intensityd.py index 4aa01ce31f..d90febca32 100644 --- a/tests/test_std_shift_intensityd.py +++ b/tests/transforms/test_std_shift_intensityd.py @@ -21,7 +21,6 @@ class TestStdShiftIntensityd(NumpyImageTestCase2D): - def test_value(self): key = "img" factor = np.random.rand() diff --git a/tests/test_threshold_intensity.py b/tests/transforms/test_threshold_intensity.py similarity index 99% rename from tests/test_threshold_intensity.py rename to tests/transforms/test_threshold_intensity.py index dd485af05b..8803987fd9 100644 --- a/tests/test_threshold_intensity.py +++ b/tests/transforms/test_threshold_intensity.py @@ -27,7 +27,6 @@ class TestThresholdIntensity(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, in_type, input_param, expected_value): test_data = in_type(np.arange(10)) diff --git a/tests/test_threshold_intensityd.py b/tests/transforms/test_threshold_intensityd.py similarity index 99% rename from tests/test_threshold_intensityd.py rename to tests/transforms/test_threshold_intensityd.py index 5e7fef0fe3..e825340658 100644 --- a/tests/test_threshold_intensityd.py +++ b/tests/transforms/test_threshold_intensityd.py @@ -45,7 +45,6 @@ class TestThresholdIntensityd(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, in_type, input_param, expected_value): test_data = {"image": in_type(np.arange(10)), "label": in_type(np.arange(10)), "extra": in_type(np.arange(10))} diff --git a/tests/test_to_contiguous.py b/tests/transforms/test_to_contiguous.py similarity index 99% rename from tests/test_to_contiguous.py rename to tests/transforms/test_to_contiguous.py index a6a9cbf799..2b4fe7a4e9 100644 --- a/tests/test_to_contiguous.py +++ b/tests/transforms/test_to_contiguous.py @@ -21,7 +21,6 @@ class TestToContiguous(unittest.TestCase): - def test_contiguous_dict(self): tochange = np.moveaxis(np.zeros((2, 3, 4)), 0, -1) test_dict = {"test_key": [[1]], 0: np.array(0), 1: np.array([0]), "nested": {"nested": [tochange]}} diff --git a/tests/test_to_cupy.py b/tests/transforms/test_to_cupy.py similarity index 99% rename from tests/test_to_cupy.py rename to tests/transforms/test_to_cupy.py index 62dfd1c903..a9f95ba1b0 100644 --- a/tests/test_to_cupy.py +++ b/tests/transforms/test_to_cupy.py @@ -26,7 +26,6 @@ @skipUnless(HAS_CUPY, "CuPy is required.") class TestToCupy(unittest.TestCase): - def test_cupy_input(self): test_data = cp.array([[1, 2], [3, 4]], dtype=cp.float32) test_data = cp.rot90(test_data) diff --git a/tests/test_to_cupyd.py b/tests/transforms/test_to_cupyd.py similarity index 99% rename from tests/test_to_cupyd.py rename to tests/transforms/test_to_cupyd.py index 390c2cb6df..9e26b4d1a5 100644 --- a/tests/test_to_cupyd.py +++ b/tests/transforms/test_to_cupyd.py @@ -26,7 +26,6 @@ @skipUnless(HAS_CUPY, "CuPy is required.") class TestToCupyd(unittest.TestCase): - def test_cupy_input(self): test_data = cp.array([[1, 2], [3, 4]]) test_data = cp.rot90(test_data) diff --git a/tests/test_to_device.py b/tests/transforms/test_to_device.py similarity index 99% rename from tests/test_to_device.py rename to tests/transforms/test_to_device.py index 34d2a16e07..1bfa199c96 100644 --- a/tests/test_to_device.py +++ b/tests/transforms/test_to_device.py @@ -30,7 +30,6 @@ @skip_if_no_cuda class TestToDevice(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) def test_value(self, device): converter = ToDevice(device=device, non_blocking=True) diff --git a/tests/test_to_deviced.py b/tests/transforms/test_to_deviced.py similarity index 99% rename from tests/test_to_deviced.py rename to tests/transforms/test_to_deviced.py index 9580dd4e10..d86634fb3f 100644 --- a/tests/test_to_deviced.py +++ b/tests/transforms/test_to_deviced.py @@ -22,7 +22,6 @@ @skip_if_no_cuda class TestToDeviced(unittest.TestCase): - def test_value(self): device = "cuda:0" data = [{"img": torch.tensor(i)} for i in range(4)] diff --git a/tests/test_to_numpy.py b/tests/transforms/test_to_numpy.py similarity index 99% rename from tests/test_to_numpy.py rename to tests/transforms/test_to_numpy.py index be5ce1f38a..6e69fbf90e 100644 --- a/tests/test_to_numpy.py +++ b/tests/transforms/test_to_numpy.py @@ -25,7 +25,6 @@ class TestToNumpy(unittest.TestCase): - @skipUnless(HAS_CUPY, "CuPy is required.") def test_cupy_input(self): test_data = cp.array([[1, 2], [3, 4]]) diff --git a/tests/test_to_numpyd.py b/tests/transforms/test_to_numpyd.py similarity index 99% rename from tests/test_to_numpyd.py rename to tests/transforms/test_to_numpyd.py index 2dcafa2da8..532cb7d332 100644 --- a/tests/test_to_numpyd.py +++ b/tests/transforms/test_to_numpyd.py @@ -25,7 +25,6 @@ class TestToNumpyd(unittest.TestCase): - @skipUnless(HAS_CUPY, "CuPy is required.") def test_cupy_input(self): test_data = cp.array([[1, 2], [3, 4]]) diff --git a/tests/test_to_pil.py b/tests/transforms/test_to_pil.py similarity index 99% rename from tests/test_to_pil.py rename to tests/transforms/test_to_pil.py index 25d533b94e..f318bb9e79 100644 --- a/tests/test_to_pil.py +++ b/tests/transforms/test_to_pil.py @@ -40,7 +40,6 @@ class TestToPIL(unittest.TestCase): - @parameterized.expand(TESTS) @skipUnless(has_pil, "Requires `pillow` package.") def test_value(self, test_data): diff --git a/tests/test_to_pild.py b/tests/transforms/test_to_pild.py similarity index 99% rename from tests/test_to_pild.py rename to tests/transforms/test_to_pild.py index 13fe3a87a8..baee3e0c97 100644 --- a/tests/test_to_pild.py +++ b/tests/transforms/test_to_pild.py @@ -38,7 +38,6 @@ class TestToPIL(unittest.TestCase): - @parameterized.expand(TESTS) @skipUnless(has_pil, "Requires `pillow` package.") def test_values(self, input_param, test_data): diff --git a/tests/test_to_tensor.py b/tests/transforms/test_to_tensor.py similarity index 99% rename from tests/test_to_tensor.py rename to tests/transforms/test_to_tensor.py index 3fa93bc51f..790556a9bc 100644 --- a/tests/test_to_tensor.py +++ b/tests/transforms/test_to_tensor.py @@ -33,7 +33,6 @@ class TestToTensor(unittest.TestCase): - @parameterized.expand(TESTS) def test_array_input(self, test_data, expected_shape): result = ToTensor(dtype=torch.float32, device="cpu", wrap_sequence=True)(test_data) diff --git a/tests/test_to_tensord.py b/tests/transforms/test_to_tensord.py similarity index 99% rename from tests/test_to_tensord.py rename to tests/transforms/test_to_tensord.py index e6ad27610d..b7735f5ea3 100644 --- a/tests/test_to_tensord.py +++ b/tests/transforms/test_to_tensord.py @@ -34,7 +34,6 @@ class TestToTensord(unittest.TestCase): - @parameterized.expand(TESTS) def test_array_input(self, test_data, expected_shape): test_data = {"img": test_data} diff --git a/tests/test_torchvision.py b/tests/transforms/test_torchvision.py similarity index 99% rename from tests/test_torchvision.py rename to tests/transforms/test_torchvision.py index d64147013f..b15fd88dc5 100644 --- a/tests/test_torchvision.py +++ b/tests/transforms/test_torchvision.py @@ -55,7 +55,6 @@ class TestTorchVision(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, input_param, input_data, expected_value): set_determinism(seed=0) diff --git a/tests/test_torchvisiond.py b/tests/transforms/test_torchvisiond.py similarity index 99% rename from tests/test_torchvisiond.py rename to tests/transforms/test_torchvisiond.py index f772a8ec86..2e5003cbac 100644 --- a/tests/test_torchvisiond.py +++ b/tests/transforms/test_torchvisiond.py @@ -52,7 +52,6 @@ class TestTorchVisiond(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_value(self, input_param, input_data, expected_value): set_determinism(seed=0) diff --git a/tests/test_transform.py b/tests/transforms/test_transform.py similarity index 100% rename from tests/test_transform.py rename to tests/transforms/test_transform.py diff --git a/tests/test_transpose.py b/tests/transforms/test_transpose.py similarity index 99% rename from tests/test_transpose.py rename to tests/transforms/test_transpose.py index ae1dbde12b..b6ff5ea589 100644 --- a/tests/test_transpose.py +++ b/tests/transforms/test_transpose.py @@ -27,7 +27,6 @@ class TestTranspose(unittest.TestCase): - @parameterized.expand(TESTS) def test_transpose(self, im, indices): tr = Transpose(indices) diff --git a/tests/test_transposed.py b/tests/transforms/test_transposed.py similarity index 99% rename from tests/test_transposed.py rename to tests/transforms/test_transposed.py index 74d48383eb..b47762843a 100644 --- a/tests/test_transposed.py +++ b/tests/transforms/test_transposed.py @@ -30,7 +30,6 @@ class TestTranspose(unittest.TestCase): - @parameterized.expand(TESTS) def test_transpose(self, im, indices): data = {"i": deepcopy(im), "j": deepcopy(im)} diff --git a/tests/test_ultrasound_confidence_map_transform.py b/tests/transforms/test_ultrasound_confidence_map_transform.py similarity index 98% rename from tests/test_ultrasound_confidence_map_transform.py rename to tests/transforms/test_ultrasound_confidence_map_transform.py index 982ad53675..331c46ee00 100644 --- a/tests/test_ultrasound_confidence_map_transform.py +++ b/tests/transforms/test_ultrasound_confidence_map_transform.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path import numpy as np import torch @@ -25,6 +26,8 @@ _, has_scipy = optional_import("scipy") +TESTS_PATH = Path(__file__).parents[1] + TEST_INPUT = np.array( [ [1, 2, 3, 23, 13, 22, 5, 1, 2, 3], @@ -487,7 +490,6 @@ @unittest.skipUnless(has_scipy, "Requires scipy") class TestUltrasoundConfidenceMapTransform(unittest.TestCase): - def setUp(self): self.input_img_np = np.expand_dims(TEST_INPUT, axis=0) # mock image (numpy array) self.input_mask_np = np.expand_dims(TEST_MASK, axis=0) # mock mask (numpy array) @@ -496,13 +498,13 @@ def setUp(self): self.input_mask_torch = torch.from_numpy(TEST_MASK).unsqueeze(0) # mock mask (torch tensor) self.real_input_img_paths = [ - os.path.join(os.path.dirname(__file__), "testing_data", "ultrasound_confidence_map", "neck_input.png"), - os.path.join(os.path.dirname(__file__), "testing_data", "ultrasound_confidence_map", "femur_input.png"), + os.path.join(TESTS_PATH, "testing_data", "ultrasound_confidence_map", "neck_input.png"), + os.path.join(TESTS_PATH, "testing_data", "ultrasound_confidence_map", "femur_input.png"), ] self.real_result_npy_paths = [ - os.path.join(os.path.dirname(__file__), "testing_data", "ultrasound_confidence_map", "neck_result.npy"), - os.path.join(os.path.dirname(__file__), "testing_data", "ultrasound_confidence_map", "femur_result.npy"), + os.path.join(TESTS_PATH, "testing_data", "ultrasound_confidence_map", "neck_result.npy"), + os.path.join(TESTS_PATH, "testing_data", "ultrasound_confidence_map", "femur_result.npy"), ] self.real_input_paramaters = [ diff --git a/tests/test_utils_pytorch_numpy_unification.py b/tests/transforms/test_utils_pytorch_numpy_unification.py similarity index 99% rename from tests/test_utils_pytorch_numpy_unification.py rename to tests/transforms/test_utils_pytorch_numpy_unification.py index cf382d15d2..a78fcab0d1 100644 --- a/tests/test_utils_pytorch_numpy_unification.py +++ b/tests/transforms/test_utils_pytorch_numpy_unification.py @@ -36,7 +36,6 @@ class TestPytorchNumpyUnification(unittest.TestCase): - def setUp(self) -> None: set_determinism(0) diff --git a/tests/test_vote_ensemble.py b/tests/transforms/test_vote_ensemble.py similarity index 99% rename from tests/test_vote_ensemble.py rename to tests/transforms/test_vote_ensemble.py index f034a442c7..3a93600ec4 100644 --- a/tests/test_vote_ensemble.py +++ b/tests/transforms/test_vote_ensemble.py @@ -71,7 +71,6 @@ class TestVoteEnsemble(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, input_param, img, expected_value): result = VoteEnsemble(**input_param)(img) diff --git a/tests/test_vote_ensembled.py b/tests/transforms/test_vote_ensembled.py similarity index 99% rename from tests/test_vote_ensembled.py rename to tests/transforms/test_vote_ensembled.py index 1ad4d17869..e43271357e 100644 --- a/tests/test_vote_ensembled.py +++ b/tests/transforms/test_vote_ensembled.py @@ -86,7 +86,6 @@ class TestVoteEnsembled(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, input_param, img, expected_value): result = VoteEnsembled(**input_param)(img) diff --git a/tests/test_with_allow_missing_keys.py b/tests/transforms/test_with_allow_missing_keys.py similarity index 100% rename from tests/test_with_allow_missing_keys.py rename to tests/transforms/test_with_allow_missing_keys.py diff --git a/tests/test_zoom.py b/tests/transforms/test_zoom.py similarity index 99% rename from tests/test_zoom.py rename to tests/transforms/test_zoom.py index 67da9e2e82..93225420c5 100644 --- a/tests/test_zoom.py +++ b/tests/transforms/test_zoom.py @@ -43,7 +43,6 @@ class TestZoom(NumpyImageTestCase2D): - @parameterized.expand(VALID_CASES) def test_pending_ops(self, zoom, mode, align_corners=False, keep_size=False): im = MetaTensor(self.imt[0], meta={"a": "b", "affine": DEFAULT_TEST_AFFINE}) diff --git a/tests/test_zoomd.py b/tests/transforms/test_zoomd.py similarity index 99% rename from tests/test_zoomd.py rename to tests/transforms/test_zoomd.py index dacc9eb897..65b82a3fbf 100644 --- a/tests/test_zoomd.py +++ b/tests/transforms/test_zoomd.py @@ -34,7 +34,6 @@ class TestZoomd(NumpyImageTestCase2D): - @parameterized.expand(VALID_CASES) def test_correct_results(self, zoom, mode, keep_size, align_corners=None): key = "img" diff --git a/tests/transforms/transform/__init__.py b/tests/transforms/transform/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/transform/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_randomizable.py b/tests/transforms/transform/test_randomizable.py similarity index 100% rename from tests/test_randomizable.py rename to tests/transforms/transform/test_randomizable.py diff --git a/tests/test_randomizable_transform_type.py b/tests/transforms/transform/test_randomizable_transform_type.py similarity index 100% rename from tests/test_randomizable_transform_type.py rename to tests/transforms/transform/test_randomizable_transform_type.py diff --git a/tests/transforms/utility/__init__.py b/tests/transforms/utility/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/utility/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_apply_transform_to_points.py b/tests/transforms/utility/test_apply_transform_to_points.py similarity index 100% rename from tests/test_apply_transform_to_points.py rename to tests/transforms/utility/test_apply_transform_to_points.py diff --git a/tests/test_apply_transform_to_pointsd.py b/tests/transforms/utility/test_apply_transform_to_pointsd.py similarity index 100% rename from tests/test_apply_transform_to_pointsd.py rename to tests/transforms/utility/test_apply_transform_to_pointsd.py diff --git a/tests/test_identity.py b/tests/transforms/utility/test_identity.py similarity index 99% rename from tests/test_identity.py rename to tests/transforms/utility/test_identity.py index 4865781c52..b873854e0a 100644 --- a/tests/test_identity.py +++ b/tests/transforms/utility/test_identity.py @@ -18,7 +18,6 @@ class TestIdentity(NumpyImageTestCase2D): - def test_identity(self): for p in TEST_NDARRAYS: img = p(self.imt) diff --git a/tests/test_identityd.py b/tests/transforms/utility/test_identityd.py similarity index 99% rename from tests/test_identityd.py rename to tests/transforms/utility/test_identityd.py index 49d7d92216..770eb14620 100644 --- a/tests/test_identityd.py +++ b/tests/transforms/utility/test_identityd.py @@ -18,7 +18,6 @@ class TestIdentityd(NumpyImageTestCase2D): - def test_identityd(self): for p in TEST_NDARRAYS: img = p(self.imt) diff --git a/tests/test_lambda.py b/tests/transforms/utility/test_lambda.py similarity index 99% rename from tests/test_lambda.py rename to tests/transforms/utility/test_lambda.py index 0a9349b52c..1aa3567d23 100644 --- a/tests/test_lambda.py +++ b/tests/transforms/utility/test_lambda.py @@ -23,7 +23,6 @@ class TestLambda(NumpyImageTestCase2D): - def test_lambda_identity(self): for p in TEST_NDARRAYS: img = p(self.imt) diff --git a/tests/test_lambdad.py b/tests/transforms/utility/test_lambdad.py similarity index 99% rename from tests/test_lambdad.py rename to tests/transforms/utility/test_lambdad.py index 3b177e040a..2c996b23db 100644 --- a/tests/test_lambdad.py +++ b/tests/transforms/utility/test_lambdad.py @@ -23,7 +23,6 @@ class TestLambdad(NumpyImageTestCase2D): - def test_lambdad_identity(self): for p in TEST_NDARRAYS: img = p(self.imt) diff --git a/tests/test_rand_lambda.py b/tests/transforms/utility/test_rand_lambda.py similarity index 99% rename from tests/test_rand_lambda.py rename to tests/transforms/utility/test_rand_lambda.py index fe89202fef..74627c3d3b 100644 --- a/tests/test_rand_lambda.py +++ b/tests/transforms/utility/test_rand_lambda.py @@ -37,7 +37,6 @@ def __call__(self, data): class TestRandLambda(unittest.TestCase): - def check(self, tr: RandLambda, img, img_orig_type, out, expected=None): # input shouldn't change self.assertIsInstance(img, img_orig_type) diff --git a/tests/test_rand_lambdad.py b/tests/transforms/utility/test_rand_lambdad.py similarity index 99% rename from tests/test_rand_lambdad.py rename to tests/transforms/utility/test_rand_lambdad.py index 19049c288e..7eb8ddf7ba 100644 --- a/tests/test_rand_lambdad.py +++ b/tests/transforms/utility/test_rand_lambdad.py @@ -37,7 +37,6 @@ def __call__(self, data): class TestRandLambdad(unittest.TestCase): - def check(self, tr: RandLambdad, input: dict, out: dict, expected: dict): if isinstance(input["img"], MetaTensor): self.assertEqual(len(input["img"].applied_operations), 0) diff --git a/tests/test_simulatedelay.py b/tests/transforms/utility/test_simulatedelay.py similarity index 99% rename from tests/test_simulatedelay.py rename to tests/transforms/utility/test_simulatedelay.py index 489a9f30d0..743150ab0d 100644 --- a/tests/test_simulatedelay.py +++ b/tests/transforms/utility/test_simulatedelay.py @@ -22,7 +22,6 @@ class TestSimulateDelay(NumpyImageTestCase2D): - @parameterized.expand([(0.45,), (1,)]) def test_value(self, delay_test_time: float): resize = SimulateDelay(delay_time=delay_test_time) diff --git a/tests/test_simulatedelayd.py b/tests/transforms/utility/test_simulatedelayd.py similarity index 99% rename from tests/test_simulatedelayd.py rename to tests/transforms/utility/test_simulatedelayd.py index 9eac4a0e66..1d473a86a9 100644 --- a/tests/test_simulatedelayd.py +++ b/tests/transforms/utility/test_simulatedelayd.py @@ -22,7 +22,6 @@ class TestSimulateDelay(NumpyImageTestCase2D): - @parameterized.expand([(0.45,), (1,)]) def test_value(self, delay_test_time: float): resize = SimulateDelayd(keys="imgd", delay_time=delay_test_time) diff --git a/tests/test_splitdim.py b/tests/transforms/utility/test_splitdim.py similarity index 99% rename from tests/test_splitdim.py rename to tests/transforms/utility/test_splitdim.py index e0eaca182f..31d9983a2b 100644 --- a/tests/test_splitdim.py +++ b/tests/transforms/utility/test_splitdim.py @@ -26,7 +26,6 @@ class TestSplitDim(unittest.TestCase): - @parameterized.expand(TESTS) def test_correct_shape(self, shape, keepdim, im_type): arr = im_type(np.random.rand(*shape)) diff --git a/tests/test_splitdimd.py b/tests/transforms/utility/test_splitdimd.py similarity index 100% rename from tests/test_splitdimd.py rename to tests/transforms/utility/test_splitdimd.py diff --git a/tests/transforms/utils/__init__.py b/tests/transforms/utils/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/transforms/utils/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_correct_crop_centers.py b/tests/transforms/utils/test_correct_crop_centers.py similarity index 99% rename from tests/test_correct_crop_centers.py rename to tests/transforms/utils/test_correct_crop_centers.py index df0e32f5c4..3b483c8e69 100644 --- a/tests/test_correct_crop_centers.py +++ b/tests/transforms/utils/test_correct_crop_centers.py @@ -23,7 +23,6 @@ class TestCorrectCropCenters(unittest.TestCase): - @parameterized.expand(TESTS) def test_torch(self, spatial_size, centers, label_spatial_shape): result1 = correct_crop_centers(centers, spatial_size, label_spatial_shape) diff --git a/tests/test_get_unique_labels.py b/tests/transforms/utils/test_get_unique_labels.py similarity index 99% rename from tests/test_get_unique_labels.py rename to tests/transforms/utils/test_get_unique_labels.py index 8735768902..13e034b9f8 100644 --- a/tests/test_get_unique_labels.py +++ b/tests/transforms/utils/test_get_unique_labels.py @@ -35,7 +35,6 @@ class TestGetUniqueLabels(unittest.TestCase): - @parameterized.expand(TESTS) def test_correct_results(self, args, expected): result = get_unique_labels(**args) diff --git a/tests/test_print_transform_backends.py b/tests/transforms/utils/test_print_transform_backends.py similarity index 100% rename from tests/test_print_transform_backends.py rename to tests/transforms/utils/test_print_transform_backends.py diff --git a/tests/test_soft_clip.py b/tests/transforms/utils/test_soft_clip.py similarity index 100% rename from tests/test_soft_clip.py rename to tests/transforms/utils/test_soft_clip.py diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/utils/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/utils/enums/__init__.py b/tests/utils/enums/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/utils/enums/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_hovernet_loss.py b/tests/utils/enums/test_hovernet_loss.py similarity index 100% rename from tests/test_hovernet_loss.py rename to tests/utils/enums/test_hovernet_loss.py diff --git a/tests/test_ordering.py b/tests/utils/enums/test_ordering.py similarity index 100% rename from tests/test_ordering.py rename to tests/utils/enums/test_ordering.py diff --git a/tests/test_wsireader.py b/tests/utils/enums/test_wsireader.py similarity index 98% rename from tests/test_wsireader.py rename to tests/utils/enums/test_wsireader.py index 5ce4ca9502..3b84af7345 100644 --- a/tests/test_wsireader.py +++ b/tests/utils/enums/test_wsireader.py @@ -13,6 +13,7 @@ import os import unittest +from pathlib import Path from typing import Any from unittest import skipUnless @@ -35,11 +36,12 @@ _, has_codec = optional_import("imagecodecs") has_tiff = has_tiff and has_codec +TESTS_PATH = Path(__file__).parents[2] WSI_GENERIC_TIFF_KEY = "wsi_generic_tiff" -WSI_GENERIC_TIFF_PATH = os.path.join(os.path.dirname(__file__), "testing_data", f"temp_{WSI_GENERIC_TIFF_KEY}.tiff") +WSI_GENERIC_TIFF_PATH = os.path.join(TESTS_PATH, "testing_data", f"temp_{WSI_GENERIC_TIFF_KEY}.tiff") WSI_APERIO_SVS_KEY = "wsi_aperio_svs" -WSI_APERIO_SVS_PATH = os.path.join(os.path.dirname(__file__), "testing_data", f"temp_{WSI_APERIO_SVS_KEY}.svs") +WSI_APERIO_SVS_PATH = os.path.join(TESTS_PATH, "testing_data", f"temp_{WSI_APERIO_SVS_KEY}.svs") WSI_GENERIC_TIFF_HEIGHT = 32914 WSI_GENERIC_TIFF_WIDTH = 46000 @@ -402,7 +404,6 @@ def setUpModule(): class WSIReaderTests: - class Tests(unittest.TestCase): backend = None @@ -497,9 +498,7 @@ def test_read_rgba(self, img_expected): reader = WSIReader(self.backend) for mode in ["RGB", "RGBA"]: file_path = save_rgba_tiff( - img_expected, - os.path.join(os.path.dirname(__file__), "testing_data", f"temp_tiff_image_{mode}.tiff"), - mode=mode, + img_expected, os.path.join(TESTS_PATH, "testing_data", f"temp_tiff_image_{mode}.tiff"), mode=mode ) with reader.read(file_path) as img_obj: image[mode], _ = reader.get_data(img_obj) @@ -514,7 +513,7 @@ def test_read_malformats(self, img_expected): # Until cuCIM addresses https://github.com/rapidsai/cucim/issues/230 return reader = WSIReader(self.backend) - file_path = os.path.join(os.path.dirname(__file__), "testing_data", "temp_tiff_image_gray.tiff") + file_path = os.path.join(TESTS_PATH, "testing_data", "temp_tiff_image_gray.tiff") imwrite(file_path, img_expected, shape=img_expected.shape) with self.assertRaises((RuntimeError, ValueError, openslide.OpenSlideError if has_osl else ValueError)): with reader.read(file_path) as img_obj: @@ -641,7 +640,6 @@ def test_errors(self, file_path, reader_kwargs, patch_info, exception): @skipUnless(has_cucim, "Requires cucim") class TestCuCIM(WSIReaderTests.Tests): - @classmethod def setUpClass(cls): cls.backend = "cucim" @@ -649,7 +647,6 @@ def setUpClass(cls): @skipUnless(has_osl, "Requires openslide") class TestOpenSlide(WSIReaderTests.Tests): - @classmethod def setUpClass(cls): cls.backend = "openslide" @@ -657,7 +654,6 @@ def setUpClass(cls): @skipUnless(has_tiff, "Requires tifffile") class TestTiffFile(WSIReaderTests.Tests): - @classmethod def setUpClass(cls): cls.backend = "tifffile" diff --git a/tests/utils/misc/__init__.py b/tests/utils/misc/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/utils/misc/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_ensure_tuple.py b/tests/utils/misc/test_ensure_tuple.py similarity index 99% rename from tests/test_ensure_tuple.py rename to tests/utils/misc/test_ensure_tuple.py index e889f9bfc4..c947c12f69 100644 --- a/tests/test_ensure_tuple.py +++ b/tests/utils/misc/test_ensure_tuple.py @@ -37,7 +37,6 @@ class TestEnsureTuple(unittest.TestCase): - @parameterized.expand(TESTS) def test_value(self, input, expected_value, wrap_array=False): result = ensure_tuple(input, wrap_array) diff --git a/tests/test_monai_env_vars.py b/tests/utils/misc/test_monai_env_vars.py similarity index 100% rename from tests/test_monai_env_vars.py rename to tests/utils/misc/test_monai_env_vars.py diff --git a/tests/test_monai_utils_misc.py b/tests/utils/misc/test_monai_utils_misc.py similarity index 100% rename from tests/test_monai_utils_misc.py rename to tests/utils/misc/test_monai_utils_misc.py diff --git a/tests/test_str2bool.py b/tests/utils/misc/test_str2bool.py similarity index 100% rename from tests/test_str2bool.py rename to tests/utils/misc/test_str2bool.py diff --git a/tests/test_str2list.py b/tests/utils/misc/test_str2list.py similarity index 100% rename from tests/test_str2list.py rename to tests/utils/misc/test_str2list.py diff --git a/tests/test_alias.py b/tests/utils/test_alias.py similarity index 92% rename from tests/test_alias.py rename to tests/utils/test_alias.py index e2dd8bcf26..e7abff3d89 100644 --- a/tests/test_alias.py +++ b/tests/utils/test_alias.py @@ -15,15 +15,18 @@ import inspect import os import unittest +from pathlib import Path from monai.utils import optional_import +TESTS_PATH = Path(__file__).parents[1] + class TestModuleAlias(unittest.TestCase): """check that 'import monai.xx.file_name' returns a module""" def test_files(self): - src_dir = os.path.dirname(os.path.dirname(__file__)) + src_dir = os.path.dirname(TESTS_PATH) monai_dir = os.path.join(src_dir, "monai") py_files = glob.glob(os.path.join(monai_dir, "**", "*.py"), recursive=True) for x in py_files: diff --git a/tests/test_component_store.py b/tests/utils/test_component_store.py similarity index 100% rename from tests/test_component_store.py rename to tests/utils/test_component_store.py diff --git a/tests/test_deprecated.py b/tests/utils/test_deprecated.py similarity index 100% rename from tests/test_deprecated.py rename to tests/utils/test_deprecated.py diff --git a/tests/test_enum_bound_interp.py b/tests/utils/test_enum_bound_interp.py similarity index 99% rename from tests/test_enum_bound_interp.py rename to tests/utils/test_enum_bound_interp.py index 8101e85a92..6f8ffa2481 100644 --- a/tests/test_enum_bound_interp.py +++ b/tests/utils/test_enum_bound_interp.py @@ -22,7 +22,6 @@ @skip_if_no_cpp_extension class TestEnumBoundInterp(unittest.TestCase): - def test_bound(self): self.assertEqual(str(b.replicate), "BoundType.replicate") self.assertEqual(str(b.nearest), "BoundType.replicate") diff --git a/tests/test_evenly_divisible_all_gather_dist.py b/tests/utils/test_evenly_divisible_all_gather_dist.py similarity index 99% rename from tests/test_evenly_divisible_all_gather_dist.py rename to tests/utils/test_evenly_divisible_all_gather_dist.py index 816563cce9..cea8921544 100644 --- a/tests/test_evenly_divisible_all_gather_dist.py +++ b/tests/utils/test_evenly_divisible_all_gather_dist.py @@ -21,7 +21,6 @@ class DistributedEvenlyDivisibleAllGather(DistTestCase): - @DistCall(nnodes=1, nproc_per_node=2) def test_data(self): self._run() diff --git a/tests/test_get_package_version.py b/tests/utils/test_get_package_version.py similarity index 100% rename from tests/test_get_package_version.py rename to tests/utils/test_get_package_version.py diff --git a/tests/test_handler_logfile.py b/tests/utils/test_handler_logfile.py similarity index 99% rename from tests/test_handler_logfile.py rename to tests/utils/test_handler_logfile.py index eece744e42..344ede7874 100644 --- a/tests/test_handler_logfile.py +++ b/tests/utils/test_handler_logfile.py @@ -30,7 +30,6 @@ class TestHandlerLogfile(unittest.TestCase): - def setUp(self): if has_ignite: # set up engine diff --git a/tests/test_handler_metric_logger.py b/tests/utils/test_handler_metric_logger.py similarity index 99% rename from tests/test_handler_metric_logger.py rename to tests/utils/test_handler_metric_logger.py index 35c32fa42b..cf67fc3843 100644 --- a/tests/test_handler_metric_logger.py +++ b/tests/utils/test_handler_metric_logger.py @@ -28,7 +28,6 @@ class TestHandlerMetricLogger(unittest.TestCase): - @SkipIfNoModule("ignite") def test_metric_logging(self): dummy_name = "dummy" diff --git a/tests/test_list_to_dict.py b/tests/utils/test_list_to_dict.py similarity index 100% rename from tests/test_list_to_dict.py rename to tests/utils/test_list_to_dict.py diff --git a/tests/test_look_up_option.py b/tests/utils/test_look_up_option.py similarity index 100% rename from tests/test_look_up_option.py rename to tests/utils/test_look_up_option.py diff --git a/tests/test_optional_import.py b/tests/utils/test_optional_import.py similarity index 100% rename from tests/test_optional_import.py rename to tests/utils/test_optional_import.py diff --git a/tests/test_pad_mode.py b/tests/utils/test_pad_mode.py similarity index 99% rename from tests/test_pad_mode.py rename to tests/utils/test_pad_mode.py index 1992b83d52..a4a4012fc5 100644 --- a/tests/test_pad_mode.py +++ b/tests/utils/test_pad_mode.py @@ -23,7 +23,6 @@ @SkipIfBeforePyTorchVersion((1, 10, 1)) class TestPadMode(unittest.TestCase): - def test_pad(self): expected_shapes = {3: (1, 15, 10), 4: (1, 10, 6, 7)} for t in (float, int, np.uint8, np.int16, np.float32, bool): diff --git a/tests/test_profiling.py b/tests/utils/test_profiling.py similarity index 99% rename from tests/test_profiling.py rename to tests/utils/test_profiling.py index d960531a54..da41a8ef69 100644 --- a/tests/test_profiling.py +++ b/tests/utils/test_profiling.py @@ -29,7 +29,6 @@ class TestWorkflowProfiler(unittest.TestCase): - def setUp(self): super().setUp() diff --git a/tests/test_rankfilter_dist.py b/tests/utils/test_rankfilter_dist.py similarity index 99% rename from tests/test_rankfilter_dist.py rename to tests/utils/test_rankfilter_dist.py index 1f4811a3c7..caeaf2e10d 100644 --- a/tests/test_rankfilter_dist.py +++ b/tests/utils/test_rankfilter_dist.py @@ -23,7 +23,6 @@ class DistributedRankFilterTest(DistTestCase): - def setUp(self): self.log_dir = tempfile.TemporaryDirectory() @@ -51,7 +50,6 @@ def tearDown(self) -> None: class SingleRankFilterTest(unittest.TestCase): - def tearDown(self) -> None: self.log_dir.cleanup() diff --git a/tests/test_require_pkg.py b/tests/utils/test_require_pkg.py similarity index 100% rename from tests/test_require_pkg.py rename to tests/utils/test_require_pkg.py diff --git a/tests/test_sample_slices.py b/tests/utils/test_sample_slices.py similarity index 99% rename from tests/test_sample_slices.py rename to tests/utils/test_sample_slices.py index 79ebcbda05..7080fb1b88 100644 --- a/tests/test_sample_slices.py +++ b/tests/utils/test_sample_slices.py @@ -32,7 +32,6 @@ class TestSampleSlices(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5]) def test_shape(self, input_data, dim, as_indices, vals, expected_result): for p in TEST_NDARRAYS: diff --git a/tests/test_set_determinism.py b/tests/utils/test_set_determinism.py similarity index 99% rename from tests/test_set_determinism.py rename to tests/utils/test_set_determinism.py index d5b578f1c8..2507741eb4 100644 --- a/tests/test_set_determinism.py +++ b/tests/utils/test_set_determinism.py @@ -21,7 +21,6 @@ class TestSetDeterminism(unittest.TestCase): - def test_values(self): # check system default flags set_determinism(None) @@ -56,7 +55,6 @@ def test_values(self): class TestSetFlag(unittest.TestCase): - def setUp(self): set_determinism(1, use_deterministic_algorithms=True) diff --git a/tests/test_squeeze_unsqueeze.py b/tests/utils/test_squeeze_unsqueeze.py similarity index 100% rename from tests/test_squeeze_unsqueeze.py rename to tests/utils/test_squeeze_unsqueeze.py diff --git a/tests/test_state_cacher.py b/tests/utils/test_state_cacher.py similarity index 100% rename from tests/test_state_cacher.py rename to tests/utils/test_state_cacher.py diff --git a/tests/test_torchscript_utils.py b/tests/utils/test_torchscript_utils.py similarity index 100% rename from tests/test_torchscript_utils.py rename to tests/utils/test_torchscript_utils.py diff --git a/tests/test_version.py b/tests/utils/test_version.py similarity index 100% rename from tests/test_version.py rename to tests/utils/test_version.py diff --git a/tests/test_version_after.py b/tests/utils/test_version_after.py similarity index 100% rename from tests/test_version_after.py rename to tests/utils/test_version_after.py diff --git a/tests/utils/type_conversion/__init__.py b/tests/utils/type_conversion/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/utils/type_conversion/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_convert_data_type.py b/tests/utils/type_conversion/test_convert_data_type.py similarity index 99% rename from tests/test_convert_data_type.py rename to tests/utils/type_conversion/test_convert_data_type.py index a64e9553b1..4c1905b188 100644 --- a/tests/test_convert_data_type.py +++ b/tests/utils/type_conversion/test_convert_data_type.py @@ -78,7 +78,6 @@ class TestTensor(torch.Tensor): class TestConvertDataType(unittest.TestCase): - @parameterized.expand(TESTS) def test_convert_data_type(self, in_image, im_out, out_dtype, safe): converted_im, orig_type, orig_device = convert_data_type(in_image, type(im_out), dtype=out_dtype, safe=safe) diff --git a/tests/test_get_equivalent_dtype.py b/tests/utils/type_conversion/test_get_equivalent_dtype.py similarity index 99% rename from tests/test_get_equivalent_dtype.py rename to tests/utils/type_conversion/test_get_equivalent_dtype.py index 497b2ab591..7e733d9ef6 100644 --- a/tests/test_get_equivalent_dtype.py +++ b/tests/utils/type_conversion/test_get_equivalent_dtype.py @@ -29,7 +29,6 @@ class TestGetEquivalentDtype(unittest.TestCase): - @parameterized.expand(TESTS) def test_get_equivalent_dtype(self, im, input_dtype): out_dtype = get_equivalent_dtype(input_dtype, type(im)) diff --git a/tests/test_safe_dtype_range.py b/tests/utils/type_conversion/test_safe_dtype_range.py similarity index 99% rename from tests/test_safe_dtype_range.py rename to tests/utils/type_conversion/test_safe_dtype_range.py index ffbf5dba7d..a096233886 100644 --- a/tests/test_safe_dtype_range.py +++ b/tests/utils/type_conversion/test_safe_dtype_range.py @@ -54,7 +54,6 @@ class TesSafeDtypeRange(unittest.TestCase): - @parameterized.expand(TESTS) def test_safe_dtype_range(self, in_image, im_out, out_dtype): result = safe_dtype_range(in_image, out_dtype) diff --git a/tests/visualize/__init__.py b/tests/visualize/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/visualize/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_img2tensorboard.py b/tests/visualize/test_img2tensorboard.py similarity index 100% rename from tests/test_img2tensorboard.py rename to tests/visualize/test_img2tensorboard.py diff --git a/tests/test_occlusion_sensitivity.py b/tests/visualize/test_occlusion_sensitivity.py similarity index 100% rename from tests/test_occlusion_sensitivity.py rename to tests/visualize/test_occlusion_sensitivity.py diff --git a/tests/test_plot_2d_or_3d_image.py b/tests/visualize/test_plot_2d_or_3d_image.py similarity index 99% rename from tests/test_plot_2d_or_3d_image.py rename to tests/visualize/test_plot_2d_or_3d_image.py index 231e6b4161..b9ced3953c 100644 --- a/tests/test_plot_2d_or_3d_image.py +++ b/tests/visualize/test_plot_2d_or_3d_image.py @@ -40,7 +40,6 @@ @unittest.skipUnless(has_tb, "Requires SummaryWriter installation") @SkipIfBeforePyTorchVersion((1, 13)) # issue 6683 class TestPlot2dOr3dImage(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5]) def test_tb_image(self, shape): with tempfile.TemporaryDirectory() as tempdir: diff --git a/tests/test_vis_cam.py b/tests/visualize/test_vis_cam.py similarity index 100% rename from tests/test_vis_cam.py rename to tests/visualize/test_vis_cam.py diff --git a/tests/test_vis_gradcam.py b/tests/visualize/test_vis_gradcam.py similarity index 99% rename from tests/test_vis_gradcam.py rename to tests/visualize/test_vis_gradcam.py index e8d225f6f5..274510ba30 100644 --- a/tests/test_vis_gradcam.py +++ b/tests/visualize/test_vis_gradcam.py @@ -24,7 +24,6 @@ class DenseNetAdjoint(DenseNet121): - def __call__(self, x, adjoint_info): if adjoint_info != 42: raise ValueError @@ -150,7 +149,6 @@ def __call__(self, x, adjoint_info): @skip_if_quick class TestGradientClassActivationMap(unittest.TestCase): - @parameterized.expand(TESTS) def test_shape(self, cam_class, input_data, expected_shape): model = None diff --git a/tests/visualize/utils/__init__.py b/tests/visualize/utils/__init__.py new file mode 100644 index 0000000000..1e97f89407 --- /dev/null +++ b/tests/visualize/utils/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/test_blend_images.py b/tests/visualize/utils/test_blend_images.py similarity index 99% rename from tests/test_blend_images.py rename to tests/visualize/utils/test_blend_images.py index 589ae2d7c8..4ec9635e9b 100644 --- a/tests/test_blend_images.py +++ b/tests/visualize/utils/test_blend_images.py @@ -44,7 +44,6 @@ def get_alpha(img): @skipUnless(has_matplotlib, "Matplotlib required") class TestBlendImages(unittest.TestCase): - @parameterized.expand(TESTS) def test_blend(self, image, label, alpha): blended = blend_images(image, label, alpha) diff --git a/tests/test_matshow3d.py b/tests/visualize/utils/test_matshow3d.py similarity index 92% rename from tests/test_matshow3d.py rename to tests/visualize/utils/test_matshow3d.py index 7d5357aa4e..f10f1ea79d 100644 --- a/tests/test_matshow3d.py +++ b/tests/visualize/utils/test_matshow3d.py @@ -14,6 +14,7 @@ import os import tempfile import unittest +from pathlib import Path import numpy as np @@ -35,9 +36,10 @@ @SkipIfNoModule("matplotlib") class TestMatshow3d(unittest.TestCase): - def test_3d(self): - testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + test_root = Path(__file__).parents[2] + testing_dir = os.path.join(test_root, "testing_data") + print("test_root: ", testing_dir) keys = "image" xforms = Compose( [ @@ -62,7 +64,8 @@ def test_3d(self): matshow3d(ims[keys], fig=axes, figsize=(2, 2), frames_per_row=5, every_n=2, frame_dim=-1, show=False) def test_samples(self): - testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + test_root = Path(__file__).parents[2] + testing_dir = os.path.join(test_root, "testing_data") keys = "image" xforms = Compose( [ @@ -91,7 +94,8 @@ def test_samples(self): self.assertIsNone(comp, f"value of comp={comp}") # None indicates test passed def test_3d_rgb(self): - testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + test_dir = Path(__file__).parents[2].as_posix() + testing_dir = os.path.join(test_dir, "testing_data") keys = "image" xforms = Compose( [ From 2016d20646edad60d593cdc3e0760380616abcdc Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Thu, 13 Feb 2025 03:10:30 +0800 Subject: [PATCH 13/55] Recursive Item Mapping for Nested Lists in Compose (#8187) Fixes #8186. ### Description A few sentences describing the changes proposed in this pull request. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Co-authored-by: Ben Murray Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/transforms/compose.py | 39 +++++++++++++++++------- monai/transforms/transform.py | 21 +++++++++---- tests/transforms/compose/test_compose.py | 14 +++++++++ 3 files changed, 57 insertions(+), 17 deletions(-) diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py index 236d3cc4c5..4513e26678 100644 --- a/monai/transforms/compose.py +++ b/monai/transforms/compose.py @@ -47,7 +47,7 @@ def execute_compose( data: NdarrayOrTensor | Sequence[NdarrayOrTensor] | Mapping[Any, NdarrayOrTensor], transforms: Sequence[Any], - map_items: bool = True, + map_items: bool | int = True, unpack_items: bool = False, start: int = 0, end: int | None = None, @@ -65,8 +65,13 @@ def execute_compose( Args: data: a tensor-like object to be transformed transforms: a sequence of transforms to be carried out - map_items: whether to apply transform to each item in the input `data` if `data` is a list or tuple. - defaults to `True`. + map_items: controls whether to apply a transformation to each item in `data`. If `data` is a list or tuple, + it can behave as follows: + - Defaults to True, which is equivalent to `map_items=1`, meaning the transformation will be applied + to the first level of items in `data`. + - If an integer is provided, it specifies the maximum level of nesting to which the transformation + should be recursively applied. This allows treating multi-sample transforms applied after another + multi-sample transform while controlling how deep the mapping goes. unpack_items: whether to unpack input `data` with `*` as parameters for the callable function of transform. defaults to `False`. start: the index of the first transform to be executed. If not set, this defaults to 0 @@ -205,8 +210,14 @@ class Compose(Randomizable, InvertibleTransform, LazyTransform): Args: transforms: sequence of callables. - map_items: whether to apply transform to each item in the input `data` if `data` is a list or tuple. - defaults to `True`. + map_items: controls whether to apply a transformation to each item in `data`. If `data` is a list or tuple, + it can behave as follows: + + - Defaults to True, which is equivalent to `map_items=1`, meaning the transformation will be applied + to the first level of items in `data`. + - If an integer is provided, it specifies the maximum level of nesting to which the transformation + should be recursively applied. This allows treating multi-sample transforms applied after another + multi-sample transform while controlling how deep the mapping goes. unpack_items: whether to unpack input `data` with `*` as parameters for the callable function of transform. defaults to `False`. log_stats: this optional parameter allows you to specify a logger by name for logging of pipeline execution. @@ -227,7 +238,7 @@ class Compose(Randomizable, InvertibleTransform, LazyTransform): def __init__( self, transforms: Sequence[Callable] | Callable | None = None, - map_items: bool = True, + map_items: bool | int = True, unpack_items: bool = False, log_stats: bool | str = False, lazy: bool | None = False, @@ -238,9 +249,9 @@ def __init__( if transforms is None: transforms = [] - if not isinstance(map_items, bool): + if not isinstance(map_items, (bool, int)): raise ValueError( - f"Argument 'map_items' should be boolean. Got {type(map_items)}." + f"Argument 'map_items' should be boolean or int. Got {type(map_items)}." "Check brackets when passing a sequence of callables." ) @@ -391,8 +402,14 @@ class OneOf(Compose): transforms: sequence of callables. weights: probabilities corresponding to each callable in transforms. Probabilities are normalized to sum to one. - map_items: whether to apply transform to each item in the input `data` if `data` is a list or tuple. - defaults to `True`. + map_items: controls whether to apply a transformation to each item in `data`. If `data` is a list or tuple, + it can behave as follows: + + - Defaults to True, which is equivalent to `map_items=1`, meaning the transformation will be applied + to the first level of items in `data`. + - If an integer is provided, it specifies the maximum level of nesting to which the transformation + should be recursively applied. This allows treating multi-sample transforms applied after another + multi-sample transform while controlling how deep the mapping goes. unpack_items: whether to unpack input `data` with `*` as parameters for the callable function of transform. defaults to `False`. log_stats: this optional parameter allows you to specify a logger by name for logging of pipeline execution. @@ -414,7 +431,7 @@ def __init__( self, transforms: Sequence[Callable] | Callable | None = None, weights: Sequence[float] | float | None = None, - map_items: bool = True, + map_items: bool | int = True, unpack_items: bool = False, log_stats: bool | str = False, lazy: bool | None = False, diff --git a/monai/transforms/transform.py b/monai/transforms/transform.py index 15c2499a73..1a365b8d8e 100644 --- a/monai/transforms/transform.py +++ b/monai/transforms/transform.py @@ -101,12 +101,12 @@ def _apply_transform( def apply_transform( transform: Callable[..., ReturnType], data: Any, - map_items: bool = True, + map_items: bool | int = True, unpack_items: bool = False, log_stats: bool | str = False, lazy: bool | None = None, overrides: dict | None = None, -) -> list[ReturnType] | ReturnType: +) -> list[Any] | ReturnType: """ Transform `data` with `transform`. @@ -117,8 +117,13 @@ def apply_transform( Args: transform: a callable to be used to transform `data`. data: an object to be transformed. - map_items: whether to apply transform to each item in `data`, - if `data` is a list or tuple. Defaults to True. + map_items: controls whether to apply a transformation to each item in `data`. If `data` is a list or tuple, + it can behave as follows: + - Defaults to True, which is equivalent to `map_items=1`, meaning the transformation will be applied + to the first level of items in `data`. + - If an integer is provided, it specifies the maximum level of nesting to which the transformation + should be recursively applied. This allows treating multi-sample transforms applied after another + multi-sample transform while controlling how deep the mapping goes. unpack_items: whether to unpack parameters using `*`. Defaults to False. log_stats: log errors when they occur in the processing pipeline. By default, this is set to False, which disables the logger for processing pipeline errors. Setting it to None or True will enable logging to the @@ -136,8 +141,12 @@ def apply_transform( Union[List[ReturnType], ReturnType]: The return type of `transform` or a list thereof. """ try: - if isinstance(data, (list, tuple)) and map_items: - return [_apply_transform(transform, item, unpack_items, lazy, overrides, log_stats) for item in data] + map_items_ = int(map_items) if isinstance(map_items, bool) else map_items + if isinstance(data, (list, tuple)) and map_items_ > 0: + return [ + apply_transform(transform, item, map_items_ - 1, unpack_items, log_stats, lazy, overrides) + for item in data + ] return _apply_transform(transform, data, unpack_items, lazy, overrides, log_stats) except Exception as e: # if in debug mode, don't swallow exception so that the breakpoint diff --git a/tests/transforms/compose/test_compose.py b/tests/transforms/compose/test_compose.py index 3c53ac4a22..e6727c976f 100644 --- a/tests/transforms/compose/test_compose.py +++ b/tests/transforms/compose/test_compose.py @@ -141,6 +141,20 @@ def b(i, i2): self.assertEqual(mt.Compose(transforms, unpack_items=True)(data), expected) self.assertEqual(execute_compose(data, transforms, unpack_items=True), expected) + def test_list_non_dict_compose_with_unpack_map_2(self): + + def a(i, i2): + return i + "a", i2 + "a2" + + def b(i, i2): + return i + "b", i2 + "b2" + + transforms = [a, b, a, b] + data = [[("", ""), ("", "")], [("t", "t"), ("t", "t")]] + expected = [[("abab", "a2b2a2b2"), ("abab", "a2b2a2b2")], [("tabab", "ta2b2a2b2"), ("tabab", "ta2b2a2b2")]] + self.assertEqual(mt.Compose(transforms, map_items=2, unpack_items=True)(data), expected) + self.assertEqual(execute_compose(data, transforms, map_items=2, unpack_items=True), expected) + def test_list_dict_compose_no_map(self): def a(d): # transform to handle dict data From e8b500bd81c1a5100eb9a098bf81713646b21d56 Mon Sep 17 00:00:00 2001 From: James Butler Date: Fri, 14 Feb 2025 07:27:27 -0500 Subject: [PATCH 14/55] Bump min torch to 1.13.1 to mitigate CVE-2022-45907 unsafe usage of eval (#8296) ### Description This bumps the minimum required `torch` version from 1.9.0 to 1.13.1. See https://github.com/advisories/GHSA-47fc-vmwq-366v for more details such as the highest severity scoring of "Critical". - https://nvd.nist.gov/vuln/detail/CVE-2022-45907 - https://security.snyk.io/vuln/SNYK-PYTHON-TORCH-3149871 Maintainers will need to update the required status checks for the [`dev`](https://github.com/Project-MONAI/MONAI/tree/dev) branch to: - Remove min-dep-pytorch (1.10.2) - Remove min-dep-pytorch (1.11.0) - Remove min-dep-pytorch (1.12.1) - Remove min-dep-pytorch (1.13) - Add min-dep-pytorch (1.13.1) cc: @KumoLiu ### Types of changes - [x] Breaking change (fix or new feature that would cause existing functionality to change). (drop of older `torch` versions) - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. --------- Signed-off-by: James Butler Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Co-authored-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Signed-off-by: Can-Zhao --- .github/workflows/cron.yml | 4 - .github/workflows/pythonapp-gpu.yml | 16 +- .github/workflows/pythonapp-min.yml | 2 +- .github/workflows/pythonapp.yml | 2 +- docs/requirements.txt | 4 +- environment-dev.yml | 2 +- monai/apps/auto3dseg/transforms.py | 5 +- monai/data/utils.py | 19 +- monai/inferers/utils.py | 3 +- monai/losses/dice.py | 16 +- monai/losses/ds_loss.py | 4 +- monai/networks/layers/simplelayers.py | 16 +- monai/networks/utils.py | 20 +- monai/transforms/croppad/array.py | 7 +- monai/transforms/utils.py | 3 +- monai/utils/tf32.py | 10 - monai/visualize/class_activation_maps.py | 13 +- pyproject.toml | 2 +- requirements.txt | 2 +- setup.cfg | 2 +- tests/data/meta_tensor/test_meta_tensor.py | 22 +- tests/data/test_cachedataset.py | 3 +- .../test_integration_sliding_window.py | 9 +- .../integration/test_integration_workflows.py | 6 +- tests/networks/nets/test_swin_unetr.py | 3 +- tests/networks/test_convert_to_onnx.py | 19 +- tests/testing_data/integration_answers.py | 360 ------------------ tests/transforms/test_resize.py | 3 +- .../test_resize_with_pad_or_crop.py | 16 +- .../test_resize_with_pad_or_cropd.py | 14 +- 30 files changed, 53 insertions(+), 554 deletions(-) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index e13848f8fc..2e7921ec94 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -13,16 +13,12 @@ jobs: strategy: matrix: environment: - - "PT110+CUDA113" - "PT113+CUDA118" - "PT210+CUDA121" - "PT240+CUDA126" - "PTLATEST+CUDA126" include: # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes - - environment: PT110+CUDA113 - pytorch: "torch==1.10.2 torchvision==0.11.3 --extra-index-url https://download.pytorch.org/whl/cu113" - base: "nvcr.io/nvidia/pytorch:21.06-py3" # CUDA 11.3 - environment: PT113+CUDA118 pytorch: "torch==1.13.1 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu121" base: "nvcr.io/nvidia/pytorch:22.10-py3" # CUDA 11.8 diff --git a/.github/workflows/pythonapp-gpu.yml b/.github/workflows/pythonapp-gpu.yml index d8623c8087..cd916f2ebb 100644 --- a/.github/workflows/pythonapp-gpu.yml +++ b/.github/workflows/pythonapp-gpu.yml @@ -22,20 +22,10 @@ jobs: strategy: matrix: environment: - - "PT19+CUDA114DOCKER" - - "PT110+CUDA111" - - "PT112+CUDA118DOCKER" - "PT113+CUDA116" - "PT210+CUDA121DOCKER" include: # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes - - environment: PT110+CUDA111 - pytorch: "torch==1.10.2 torchvision==0.11.3 --extra-index-url https://download.pytorch.org/whl/cu111" - base: "nvcr.io/nvidia/cuda:11.1.1-devel-ubuntu18.04" - - environment: PT112+CUDA118DOCKER - # 22.09: 1.13.0a0+d0d6b1f - pytorch: "-h" # we explicitly set pytorch to -h to avoid pip install error - base: "nvcr.io/nvidia/pytorch:22.09-py3" - environment: PT113+CUDA116 pytorch: "torch==1.13.1 torchvision==0.14.1" base: "nvcr.io/nvidia/cuda:11.6.1-devel-ubuntu18.04" @@ -59,8 +49,7 @@ jobs: apt-get update apt-get install -y wget - if [ ${{ matrix.environment }} = "PT110+CUDA111" ] || \ - [ ${{ matrix.environment }} = "PT113+CUDA116" ] + if [ ${{ matrix.environment }} = "PT113+CUDA116" ] then PYVER=3.9 PYSFX=3 DISTUTILS=python3-distutils && \ apt-get update && apt-get install -y --no-install-recommends \ @@ -94,9 +83,6 @@ jobs: python get-pip.py && \ rm get-pip.py; fi - - if: matrix.environment == 'PT19+CUDA114DOCKER' - name: Optional Cupy dependency (cuda114) - run: echo "cupy-cuda114" >> requirements-dev.txt - name: Install dependencies if: github.event.pull_request.merged != true run: | diff --git a/.github/workflows/pythonapp-min.yml b/.github/workflows/pythonapp-min.yml index b0d37937e9..19e30f86bb 100644 --- a/.github/workflows/pythonapp-min.yml +++ b/.github/workflows/pythonapp-min.yml @@ -124,7 +124,7 @@ jobs: strategy: fail-fast: false matrix: - pytorch-version: ['1.10.2', '1.11.0', '1.12.1', '1.13', '2.0.1', 'latest'] + pytorch-version: ['1.13.1', '2.0.1', '2.2.2', '2.3.1', '2.4.1', 'latest'] timeout-minutes: 40 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 3c39166c1e..f175cc3f7c 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -155,7 +155,7 @@ jobs: # install the latest pytorch for testing # however, "pip install monai*.tar.gz" will build cpp/cuda with an isolated # fresh torch installation according to pyproject.toml - python -m pip install torch>=1.9 torchvision + python -m pip install torch>=1.13.1 torchvision - name: Check packages run: | pip uninstall monai diff --git a/docs/requirements.txt b/docs/requirements.txt index 7307d8e5f9..d657580743 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ --f https://download.pytorch.org/whl/cpu/torch-1.12.1%2Bcpu-cp37-cp37m-linux_x86_64.whl -torch>=1.9 +-f https://download.pytorch.org/whl/cpu/torch-1.13.1%2Bcpu-cp39-cp39-linux_x86_64.whl +torch>=1.13.1 pytorch-ignite==0.4.11 numpy>=1.20 itk>=5.2 diff --git a/environment-dev.yml b/environment-dev.yml index 4a1723e8a5..8617a3b9cb 100644 --- a/environment-dev.yml +++ b/environment-dev.yml @@ -6,7 +6,7 @@ channels: - conda-forge dependencies: - numpy>=1.24,<2.0 - - pytorch>=1.9 + - pytorch>=1.13.1 - torchio - torchvision - pytorch-cuda>=11.6 diff --git a/monai/apps/auto3dseg/transforms.py b/monai/apps/auto3dseg/transforms.py index bb755aa78c..736895b732 100644 --- a/monai/apps/auto3dseg/transforms.py +++ b/monai/apps/auto3dseg/transforms.py @@ -18,7 +18,6 @@ import torch from monai.config import KeysCollection -from monai.networks.utils import pytorch_after from monai.transforms import MapTransform from monai.utils.misc import ImageMetaKey @@ -74,9 +73,7 @@ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torc f", the metadata was not updated {filename}." ) d[key] = torch.nn.functional.interpolate( - input=d[key].unsqueeze(0), - size=image_shape, - mode="nearest-exact" if pytorch_after(1, 11) else "nearest", + input=d[key].unsqueeze(0), size=image_shape, mode="nearest-exact" ).squeeze(0) else: raise ValueError( diff --git a/monai/data/utils.py b/monai/data/utils.py index f35c5124d8..d03dbd3234 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -50,7 +50,6 @@ issequenceiterable, look_up_option, optional_import, - pytorch_after, ) pd, _ = optional_import("pandas") @@ -450,12 +449,9 @@ def collate_meta_tensor_fn(batch, *, collate_fn_map=None): Collate a sequence of meta tensor into a single batched metatensor. This is called by `collage_meta_tensor` and so should not be used as a collate function directly in dataloaders. """ - if pytorch_after(1, 13): - from torch.utils.data._utils.collate import collate_tensor_fn # imported here for pylint/mypy issues + from torch.utils.data._utils.collate import collate_tensor_fn # imported here for pylint/mypy issues - collated = collate_tensor_fn(batch) - else: - collated = default_collate(batch) + collated = collate_tensor_fn(batch) meta_dicts = [i.meta or TraceKeys.NONE for i in batch] common_ = set.intersection(*[set(d.keys()) for d in meta_dicts if isinstance(d, dict)]) @@ -494,18 +490,15 @@ def list_data_collate(batch: Sequence): Need to use this collate if apply some transforms that can generate batch data. """ + from torch.utils.data._utils.collate import default_collate_fn_map - if pytorch_after(1, 13): - # needs to go here to avoid circular import - from torch.utils.data._utils.collate import default_collate_fn_map - - from monai.data.meta_tensor import MetaTensor + from monai.data.meta_tensor import MetaTensor - default_collate_fn_map.update({MetaTensor: collate_meta_tensor_fn}) + default_collate_fn_map.update({MetaTensor: collate_meta_tensor_fn}) elem = batch[0] data = [i for k in batch for i in k] if isinstance(elem, list) else batch key = None - collate_fn = default_collate if pytorch_after(1, 13) else collate_meta_tensor + collate_fn = default_collate try: if config.USE_META_DICT: data = pickle_operations(data) # bc 0.9.0 diff --git a/monai/inferers/utils.py b/monai/inferers/utils.py index edaf736091..8adba8fa25 100644 --- a/monai/inferers/utils.py +++ b/monai/inferers/utils.py @@ -31,11 +31,10 @@ fall_back_tuple, look_up_option, optional_import, - pytorch_after, ) tqdm, _ = optional_import("tqdm", name="tqdm") -_nearest_mode = "nearest-exact" if pytorch_after(1, 11) else "nearest" +_nearest_mode = "nearest-exact" __all__ = ["sliding_window_inference"] diff --git a/monai/losses/dice.py b/monai/losses/dice.py index 4108820bec..ed88100edd 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -25,7 +25,7 @@ from monai.losses.spatial_mask import MaskedLoss from monai.losses.utils import compute_tp_fp_fn from monai.networks import one_hot -from monai.utils import DiceCEReduction, LossReduction, Weight, look_up_option, pytorch_after +from monai.utils import DiceCEReduction, LossReduction, Weight, look_up_option class DiceLoss(_Loss): @@ -738,12 +738,7 @@ def __init__( batch=batch, weight=dice_weight, ) - if pytorch_after(1, 10): - self.cross_entropy = nn.CrossEntropyLoss( - weight=weight, reduction=reduction, label_smoothing=label_smoothing - ) - else: - self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction) + self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction, label_smoothing=label_smoothing) self.binary_cross_entropy = nn.BCEWithLogitsLoss(pos_weight=weight, reduction=reduction) if lambda_dice < 0.0: raise ValueError("lambda_dice should be no less than 0.0.") @@ -751,7 +746,6 @@ def __init__( raise ValueError("lambda_ce should be no less than 0.0.") self.lambda_dice = lambda_dice self.lambda_ce = lambda_ce - self.old_pt_ver = not pytorch_after(1, 10) def ce(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ @@ -764,12 +758,6 @@ def ce(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: if n_pred_ch != n_target_ch and n_target_ch == 1: target = torch.squeeze(target, dim=1) target = target.long() - elif self.old_pt_ver: - warnings.warn( - f"Multichannel targets are not supported in this older Pytorch version {torch.__version__}. " - "Using argmax (as a workaround) to convert target to a single channel." - ) - target = torch.argmax(target, dim=1) elif not torch.is_floating_point(target): target = target.to(dtype=input.dtype) diff --git a/monai/losses/ds_loss.py b/monai/losses/ds_loss.py index aacc16874d..6a604aa22d 100644 --- a/monai/losses/ds_loss.py +++ b/monai/losses/ds_loss.py @@ -17,8 +17,6 @@ import torch.nn.functional as F from torch.nn.modules.loss import _Loss -from monai.utils import pytorch_after - class DeepSupervisionLoss(_Loss): """ @@ -42,7 +40,7 @@ def __init__(self, loss: _Loss, weight_mode: str = "exp", weights: list[float] | self.loss = loss self.weight_mode = weight_mode self.weights = weights - self.interp_mode = "nearest-exact" if pytorch_after(1, 11) else "nearest" + self.interp_mode = "nearest-exact" def get_weights(self, levels: int = 1) -> list[float]: """ diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index 4acd4a3622..6d34c3fa77 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -31,7 +31,6 @@ issequenceiterable, look_up_option, optional_import, - pytorch_after, ) _C, _ = optional_import("monai._C") @@ -293,14 +292,7 @@ def apply_filter(x: torch.Tensor, kernel: torch.Tensor, **kwargs) -> torch.Tenso x = x.view(1, kernel.shape[0], *spatials) conv = [F.conv1d, F.conv2d, F.conv3d][n_spatial - 1] if "padding" not in kwargs: - if pytorch_after(1, 10): - kwargs["padding"] = "same" - else: - # even-sized kernels are not supported - kwargs["padding"] = [(k - 1) // 2 for k in kernel.shape[2:]] - elif kwargs["padding"] == "same" and not pytorch_after(1, 10): - # even-sized kernels are not supported - kwargs["padding"] = [(k - 1) // 2 for k in kernel.shape[2:]] + kwargs["padding"] = "same" if "stride" not in kwargs: kwargs["stride"] = 1 @@ -372,11 +364,7 @@ def _make_coeffs(window_length, order): a = idx ** torch.arange(order + 1, dtype=torch.float, device="cpu").reshape(-1, 1) y = torch.zeros(order + 1, dtype=torch.float, device="cpu") y[0] = 1.0 - return ( - torch.lstsq(y, a).solution.squeeze() # type: ignore - if not pytorch_after(1, 11) - else torch.linalg.lstsq(a, y).solution.squeeze() - ) + return torch.linalg.lstsq(a, y).solution.squeeze() class HilbertTransform(nn.Module): diff --git a/monai/networks/utils.py b/monai/networks/utils.py index 1b4cb220ae..2279bed0b4 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -31,7 +31,7 @@ from monai.apps.utils import get_logger from monai.config import PathLike from monai.utils.misc import ensure_tuple, save_obj, set_determinism -from monai.utils.module import look_up_option, optional_import, pytorch_after +from monai.utils.module import look_up_option, optional_import from monai.utils.type_conversion import convert_to_dst_type, convert_to_tensor onnx, _ = optional_import("onnx") @@ -676,15 +676,6 @@ def convert_to_onnx( torch_versioned_kwargs["verify"] = verify verify = False else: - if not pytorch_after(1, 10): - if "example_outputs" not in kwargs: - # https://github.com/pytorch/pytorch/blob/release/1.9/torch/onnx/__init__.py#L182 - raise TypeError( - "example_outputs is required in scripting mode before PyTorch 1.10." - "Please provide example outputs or use trace mode to export onnx model." - ) - torch_versioned_kwargs["example_outputs"] = kwargs["example_outputs"] - del kwargs["example_outputs"] mode_to_export = torch.jit.script(model, **kwargs) if torch.is_tensor(inputs) or isinstance(inputs, dict): @@ -746,8 +737,7 @@ def convert_to_onnx( # compare onnx/ort and PyTorch results for r1, r2 in zip(torch_out, onnx_out): if isinstance(r1, torch.Tensor): - assert_fn = torch.testing.assert_close if pytorch_after(1, 11) else torch.testing.assert_allclose - assert_fn(r1.cpu(), convert_to_tensor(r2, dtype=r1.dtype), rtol=rtol, atol=atol) # type: ignore + torch.testing.assert_close(r1.cpu(), convert_to_tensor(r2, dtype=r1.dtype), rtol=rtol, atol=atol) # type: ignore return onnx_model @@ -817,8 +807,7 @@ def convert_to_torchscript( # compare TorchScript and PyTorch results for r1, r2 in zip(torch_out, torchscript_out): if isinstance(r1, torch.Tensor) or isinstance(r2, torch.Tensor): - assert_fn = torch.testing.assert_close if pytorch_after(1, 11) else torch.testing.assert_allclose - assert_fn(r1, r2, rtol=rtol, atol=atol) # type: ignore + torch.testing.assert_close(r1, r2, rtol=rtol, atol=atol) # type: ignore return script_module @@ -1031,8 +1020,7 @@ def convert_to_trt( # compare TorchScript and PyTorch results for r1, r2 in zip(torch_out, trt_out): if isinstance(r1, torch.Tensor) or isinstance(r2, torch.Tensor): - assert_fn = torch.testing.assert_close if pytorch_after(1, 11) else torch.testing.assert_allclose - assert_fn(r1, r2, rtol=rtol, atol=atol) # type: ignore + torch.testing.assert_close(r1, r2, rtol=rtol, atol=atol) # type: ignore return trt_model diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index 813f8c1d44..d5ca876e98 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -56,7 +56,6 @@ ensure_tuple_rep, fall_back_tuple, look_up_option, - pytorch_after, ) __all__ = [ @@ -392,11 +391,7 @@ def compute_slices( roi_center_t = convert_to_tensor(data=roi_center, dtype=torch.int16, wrap_sequence=True, device="cpu") roi_size_t = convert_to_tensor(data=roi_size, dtype=torch.int16, wrap_sequence=True, device="cpu") _zeros = torch.zeros_like(roi_center_t) - half = ( - torch.divide(roi_size_t, 2, rounding_mode="floor") - if pytorch_after(1, 8) - else torch.floor_divide(roi_size_t, 2) - ) + half = torch.divide(roi_size_t, 2, rounding_mode="floor") roi_start_t = torch.maximum(roi_center_t - half, _zeros) roi_end_t = torch.maximum(roi_start_t + roi_size_t, roi_start_t) else: diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index e7e1616e13..1ff0abc27c 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -68,7 +68,6 @@ look_up_option, min_version, optional_import, - pytorch_after, unsqueeze_left, unsqueeze_right, ) @@ -2255,7 +2254,7 @@ def _to_torch_resample_interp_mode(interp_mode): if ret is not None: return ret _mapping = { - SplineMode.ZERO: InterpolateMode.NEAREST_EXACT if pytorch_after(1, 11) else InterpolateMode.NEAREST, + SplineMode.ZERO: InterpolateMode.NEAREST_EXACT, SplineMode.ONE: InterpolateMode.LINEAR, SplineMode.THREE: InterpolateMode.BICUBIC, } diff --git a/monai/utils/tf32.py b/monai/utils/tf32.py index cfb023bdeb..81f56477bb 100644 --- a/monai/utils/tf32.py +++ b/monai/utils/tf32.py @@ -60,16 +60,6 @@ def detect_default_tf32() -> bool: if not has_ampere_or_later(): return False - from monai.utils.module import pytorch_after - - if pytorch_after(1, 7, 0) and not pytorch_after(1, 12, 0): - warnings.warn( - "torch.backends.cuda.matmul.allow_tf32 = True by default.\n" - " This value defaults to True when PyTorch version in [1.7, 1.11] and may affect precision.\n" - " See https://docs.monai.io/en/latest/precision_accelerating.html#precision-and-accelerating" - ) - may_enable_tf32 = True - override_tf32_env_vars = {"NVIDIA_TF32_OVERRIDE": "1"} # TORCH_ALLOW_TF32_CUBLAS_OVERRIDE not checked #6907 for name, override_val in override_tf32_env_vars.items(): if os.environ.get(name) == override_val: diff --git a/monai/visualize/class_activation_maps.py b/monai/visualize/class_activation_maps.py index 489a563818..39f26d0fbd 100644 --- a/monai/visualize/class_activation_maps.py +++ b/monai/visualize/class_activation_maps.py @@ -22,7 +22,7 @@ from monai.config import NdarrayTensor from monai.transforms import ScaleIntensity -from monai.utils import ensure_tuple, pytorch_after +from monai.utils import ensure_tuple from monai.visualize.visualizer import default_upsampler __all__ = ["CAM", "GradCAM", "GradCAMpp", "ModelWithHooks", "default_normalizer"] @@ -83,13 +83,10 @@ def __init__( continue _registered.append(name) if self.register_backward: - if pytorch_after(1, 8): - if "inplace" in mod.__dict__ and mod.__dict__["inplace"]: - # inplace=True causes errors for register_full_backward_hook - mod.__dict__["inplace"] = False - mod.register_full_backward_hook(self.backward_hook(name)) - else: - mod.register_backward_hook(self.backward_hook(name)) + if "inplace" in mod.__dict__ and mod.__dict__["inplace"]: + # inplace=True causes errors for register_full_backward_hook + mod.__dict__["inplace"] = False + mod.register_full_backward_hook(self.backward_hook(name)) if self.register_forward: mod.register_forward_hook(self.forward_hook(name)) if self.target_layers and (len(_registered) != len(self.target_layers)): diff --git a/pyproject.toml b/pyproject.toml index 9dc9cf619b..8ad55b1c2c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = [ "wheel", "setuptools", - "torch>=1.9", + "torch>=1.13.1", "ninja", "packaging" ] diff --git a/requirements.txt b/requirements.txt index 85e7312f5d..5203b43128 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -torch>=1.9,<2.6 +torch>=1.13.1,<2.6 numpy>=1.24,<2.0 diff --git a/setup.cfg b/setup.cfg index 0c69051218..66d9e19609 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,7 @@ setup_requires = ninja packaging install_requires = - torch>=1.9 + torch>=1.13.1 numpy>=1.24,<2.0 [options.extras_require] diff --git a/tests/data/meta_tensor/test_meta_tensor.py b/tests/data/meta_tensor/test_meta_tensor.py index d6a7ef9f0b..cd3def4de1 100644 --- a/tests/data/meta_tensor/test_meta_tensor.py +++ b/tests/data/meta_tensor/test_meta_tensor.py @@ -17,7 +17,6 @@ import string import tempfile import unittest -import warnings from copy import deepcopy from multiprocessing.reduction import ForkingPickler @@ -33,7 +32,6 @@ from monai.data.utils import decollate_batch, list_data_collate from monai.transforms import BorderPadd, Compose, DivisiblePadd, FromMetaTensord, ToMetaTensord from monai.utils.enums import PostFix -from monai.utils.module import pytorch_after from tests.test_utils import TEST_DEVICES, SkipIfBeforePyTorchVersion, assert_allclose, skip_if_no_cuda DTYPES = [[torch.float32], [torch.float64], [torch.float16], [torch.int64], [torch.int32], [None]] @@ -240,14 +238,6 @@ def test_torchscript(self, device): traced_fn = torch.jit.load(fname) out = traced_fn(im) self.assertIsInstance(out, torch.Tensor) - if not isinstance(out, MetaTensor) and not pytorch_after(1, 9, 1): - warnings.warn( - "When calling `nn.Module(MetaTensor) on a module traced with " - "`torch.jit.trace`, your version of pytorch returns a " - "`torch.Tensor` instead of a `MetaTensor`. Consider upgrading " - "your pytorch version if this is important to you." - ) - im_conv = im_conv.as_tensor() self.check(out, im_conv, ids=False) def test_pickling(self): @@ -256,9 +246,6 @@ def test_pickling(self): fname = os.path.join(tmp_dir, "im.pt") torch.save(m, fname) m2 = torch.load(fname) - if not isinstance(m2, MetaTensor) and not pytorch_after(1, 8, 1): - warnings.warn("Old version of pytorch. pickling converts `MetaTensor` to `torch.Tensor`.") - m = m.as_tensor() self.check(m2, m, ids=False) @skip_if_no_cuda @@ -555,11 +542,10 @@ def test_array_function(self, device="cpu", dtype=float): ) assert_allclose(np.argwhere(c == 1.0).astype(int).tolist(), [[0]]) assert_allclose(np.concatenate([c, c]), np.asarray([1.0, 2.0, 3.0, 1.0, 2.0, 3.0])) - if pytorch_after(1, 8, 1): - assert_allclose(c > np.asarray([1.0, 1.0, 1.0]), np.asarray([False, True, True])) - assert_allclose( - c > torch.as_tensor([1.0, 1.0, 1.0], device=device), torch.as_tensor([False, True, True], device=device) - ) + assert_allclose(c > np.asarray([1.0, 1.0, 1.0]), np.asarray([False, True, True])) + assert_allclose( + c > torch.as_tensor([1.0, 1.0, 1.0], device=device), torch.as_tensor([False, True, True], device=device) + ) @parameterized.expand(TESTS) def test_numpy(self, device=None, dtype=None): diff --git a/tests/data/test_cachedataset.py b/tests/data/test_cachedataset.py index dbb1b8f8f1..0c0a7ef286 100644 --- a/tests/data/test_cachedataset.py +++ b/tests/data/test_cachedataset.py @@ -22,7 +22,6 @@ from monai.data import CacheDataset, DataLoader, PersistentDataset, SmartCacheDataset from monai.transforms import Compose, Lambda, LoadImaged, RandLambda, ThreadUnsafe, Transform -from monai.utils.module import pytorch_after TEST_CASE_1 = [Compose([LoadImaged(keys=["image", "label", "extra"])]), (128, 128, 128)] @@ -130,7 +129,7 @@ class TestCacheThread(unittest.TestCase): @parameterized.expand(TEST_DS) def test_thread_safe(self, persistent_workers, cache_workers, loader_workers): expected = [102, 202, 302, 402, 502, 602, 702, 802, 902, 1002] - _kwg = {"persistent_workers": persistent_workers} if pytorch_after(1, 8) else {} + _kwg = {"persistent_workers": persistent_workers} data_list = list(range(1, 11)) dataset = CacheDataset( data=data_list, transform=_StatefulTransform(), cache_rate=1.0, num_workers=cache_workers, progress=False diff --git a/tests/integration/test_integration_sliding_window.py b/tests/integration/test_integration_sliding_window.py index a3e95b1d87..b5a01d9bc0 100644 --- a/tests/integration/test_integration_sliding_window.py +++ b/tests/integration/test_integration_sliding_window.py @@ -26,7 +26,7 @@ from monai.networks import eval_mode, predict_segmentation from monai.networks.nets import UNet from monai.transforms import EnsureChannelFirst, SaveImage -from monai.utils import pytorch_after, set_determinism +from monai.utils import set_determinism from tests.test_utils import DistTestCase, TimedCall, make_nifti_image, skip_if_quick @@ -55,11 +55,8 @@ def _sliding_window_processor(_engine, batch): return predict_segmentation(seg_probs) def save_func(engine): - if pytorch_after(1, 9, 1): - for m in engine.state.output: - saver(m) - else: - saver(engine.state.output[0]) + for m in engine.state.output: + saver(m) infer_engine = Engine(_sliding_window_processor) infer_engine.add_event_handler(Events.ITERATION_COMPLETED, save_func) diff --git a/tests/integration/test_integration_workflows.py b/tests/integration/test_integration_workflows.py index 2e14209480..8816f20ad7 100644 --- a/tests/integration/test_integration_workflows.py +++ b/tests/integration/test_integration_workflows.py @@ -53,7 +53,7 @@ ScaleIntensityd, ) from monai.utils import optional_import, set_determinism -from tests.test_utils import DistTestCase, TimedCall, assert_allclose, pytorch_after, skip_if_quick +from tests.test_utils import DistTestCase, TimedCall, assert_allclose, skip_if_quick from tests.testing_data.integration_answers import test_integration_value SummaryWriter, _ = optional_import("torch.utils.tensorboard", name="SummaryWriter") @@ -148,7 +148,7 @@ def _forward_completed(self, engine): val_handlers=val_handlers, amp=bool(amp), to_kwargs={"memory_format": torch.preserve_format}, - amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32} if pytorch_after(1, 10, 0) else {}, + amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32}, ) train_postprocessing = Compose( @@ -203,7 +203,7 @@ def _model_completed(self, engine): amp=bool(amp), optim_set_to_none=True, to_kwargs={"memory_format": torch.preserve_format}, - amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32} if pytorch_after(1, 10, 0) else {}, + amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32}, ) trainer.run() diff --git a/tests/networks/nets/test_swin_unetr.py b/tests/networks/nets/test_swin_unetr.py index 4c506a2861..4908907bfe 100644 --- a/tests/networks/nets/test_swin_unetr.py +++ b/tests/networks/nets/test_swin_unetr.py @@ -26,7 +26,6 @@ from monai.utils import optional_import from tests.test_utils import ( assert_allclose, - pytorch_after, skip_if_downloading_fails, skip_if_no_cuda, skip_if_quick, @@ -38,7 +37,7 @@ TEST_CASE_SWIN_UNETR = [] case_idx = 0 test_merging_mode = ["mergingv2", "merging", PatchMerging, PatchMergingV2] -checkpoint_vals = [True, False] if pytorch_after(1, 11) else [False] +checkpoint_vals = [True, False] for attn_drop_rate in [0.4]: for in_channels in [1]: for depth in [[2, 1, 1, 1], [1, 2, 1, 1]]: diff --git a/tests/networks/test_convert_to_onnx.py b/tests/networks/test_convert_to_onnx.py index 743630c67d..cfc356d5a4 100644 --- a/tests/networks/test_convert_to_onnx.py +++ b/tests/networks/test_convert_to_onnx.py @@ -20,7 +20,6 @@ from monai.networks import convert_to_onnx from monai.networks.nets import SegResNet, UNet -from monai.utils.module import pytorch_after from tests.test_utils import SkipIfBeforePyTorchVersion, SkipIfNoModule, optional_import, skip_if_quick if torch.cuda.is_available(): @@ -52,7 +51,7 @@ def test_unet(self, device, use_trace, use_ort): model = UNet( spatial_dims=2, in_channels=1, out_channels=3, channels=(16, 32, 64), strides=(2, 2), num_res_units=0 ) - if pytorch_after(1, 10) or use_trace: + if use_trace: onnx_model = convert_to_onnx( model=model, inputs=[torch.randn((16, 1, 32, 32), requires_grad=False)], @@ -65,22 +64,6 @@ def test_unet(self, device, use_trace, use_ort): rtol=rtol, atol=atol, ) - else: - # https://github.com/pytorch/pytorch/blob/release/1.9/torch/onnx/__init__.py#L182 - # example_outputs is required in scripting mode before PyTorch 3.10 - onnx_model = convert_to_onnx( - model=model, - inputs=[torch.randn((16, 1, 32, 32), requires_grad=False)], - input_names=["x"], - output_names=["y"], - example_outputs=[torch.randn((16, 3, 32, 32), requires_grad=False)], - verify=True, - device=device, - use_ort=use_ort, - use_trace=use_trace, - rtol=rtol, - atol=atol, - ) self.assertTrue(isinstance(onnx_model, onnx.ModelProto)) @parameterized.expand(TESTS_ORT) diff --git a/tests/testing_data/integration_answers.py b/tests/testing_data/integration_answers.py index e02b9ae995..1dbab8e544 100644 --- a/tests/testing_data/integration_answers.py +++ b/tests/testing_data/integration_answers.py @@ -70,366 +70,6 @@ ], } }, - { # test answers for PyTorch 1.12.1 - "integration_classification_2d": { - "losses": [0.776835828070428, 0.1615355300011149, 0.07492854832938523, 0.04591309238865877], - "best_metric": 0.9999184380485994, - "infer_prop": [1029, 896, 980, 1033, 961, 1046], - }, - "integration_segmentation_3d": { - "losses": [ - 0.5428894340991974, - 0.47331981360912323, - 0.4482289582490921, - 0.4452722787857056, - 0.4289989799261093, - 0.4359133839607239, - ], - "best_metric": 0.933259129524231, - "infer_metric": 0.9332860708236694, - "output_sums": [ - 0.142167581604417, - 0.15195543400875847, - 0.1512754523215521, - 0.13962938779108452, - 0.18835719348918614, - 0.16943498693483486, - 0.1465709827477569, - 0.16806483607477135, - 0.1568844609697224, - 0.17911090857818554, - 0.16252098157181355, - 0.16806016936625395, - 0.14430124467305516, - 0.11316135548315168, - 0.16183771025615476, - 0.2009426314066978, - 0.1760258010156966, - 0.09700864497950844, - 0.1938495370314683, - 0.20319147575335647, - 0.19629641404249798, - 0.20852344793102826, - 0.16185073630020633, - 0.13184196857669161, - 0.1480959525354053, - 0.14232924377085415, - 0.23177739882790951, - 0.16094610375534632, - 0.14832771888168225, - 0.10259365443625812, - 0.11850632233099603, - 0.1294100326098242, - 0.11364228279017609, - 0.15181947897584674, - 0.16319358155815072, - 0.1940284526521386, - 0.22306137879066443, - 0.18083137638759522, - 0.1903135237574692, - 0.07402317520619131, - ], - }, - "integration_workflows": { - "best_metric": 0.9219646483659745, - "infer_metric": 0.921751058101654, - "output_sums": [ - 0.14183664321899414, - 0.1513957977294922, - 0.13804054260253906, - 0.13356828689575195, - 0.18456125259399414, - 0.16363763809204102, - 0.14090299606323242, - 0.16649389266967773, - 0.15651893615722656, - 0.17655134201049805, - 0.16116666793823242, - 0.1644763946533203, - 0.14383649826049805, - 0.11055326461791992, - 0.16080379486083984, - 0.19629907608032227, - 0.17441415786743164, - 0.053577423095703125, - 0.19043779373168945, - 0.19904804229736328, - 0.19526052474975586, - 0.20304107666015625, - 0.16030025482177734, - 0.13170623779296875, - 0.15118932723999023, - 0.13686418533325195, - 0.22668886184692383, - 0.1611471176147461, - 0.1472463607788086, - 0.10427379608154297, - 0.11962461471557617, - 0.1305704116821289, - 0.11204910278320312, - 0.15171337127685547, - 0.15962505340576172, - 0.18976259231567383, - 0.21649456024169922, - 0.17761802673339844, - 0.18516874313354492, - 0.03636503219604492, - ], - "best_metric_2": 0.9219559609889985, - "infer_metric_2": 0.9217371672391892, - "output_sums_2": [ - 0.14187288284301758, - 0.15140819549560547, - 0.13802719116210938, - 0.1335887908935547, - 0.18454980850219727, - 0.1636652946472168, - 0.14091157913208008, - 0.16653108596801758, - 0.15651702880859375, - 0.17658615112304688, - 0.1611957550048828, - 0.16448307037353516, - 0.14385128021240234, - 0.1105203628540039, - 0.16085100173950195, - 0.19626951217651367, - 0.17442035675048828, - 0.053586483001708984, - 0.19042730331420898, - 0.1990523338317871, - 0.1952815055847168, - 0.20303773880004883, - 0.16034317016601562, - 0.13172531127929688, - 0.15118741989135742, - 0.1368694305419922, - 0.22667837142944336, - 0.16119050979614258, - 0.14726591110229492, - 0.10426473617553711, - 0.11961841583251953, - 0.13054800033569336, - 0.11203193664550781, - 0.15172529220581055, - 0.15963029861450195, - 0.18975019454956055, - 0.21646499633789062, - 0.17763566970825195, - 0.18517112731933594, - 0.03638744354248047, - ], - }, - }, - { # test answers for cuda 10.x - "integration_classification_2d": { - "losses": [0.777176220515731, 0.16019743723664315, 0.07480076164197011, 0.045643698364780966], - "best_metric": 0.9999418774120775, - "infer_prop": [1030, 897, 980, 1033, 960, 1048], - }, - "integration_segmentation_3d": { - "losses": [ - 0.5326887160539627, - 0.4685510128736496, - 0.46245276033878324, - 0.4411882758140564, - 0.4198471873998642, - 0.43021280467510226, - ], - "best_metric": 0.931993305683136, - "infer_metric": 0.9326668977737427, - "output_sums": [ - 0.1418775228871769, - 0.15188869120317386, - 0.15140863737688195, - 0.1396146850007127, - 0.18784343811575696, - 0.16909487431163164, - 0.14649608249452073, - 0.1677767130878611, - 0.1568122289811143, - 0.17874181729735056, - 0.16213703658980205, - 0.16754335171970686, - 0.14444824920997243, - 0.11432402622850306, - 0.16143210936221247, - 0.20055289634107482, - 0.17543571757219317, - 0.09920729163334538, - 0.19297325815057875, - 0.2023200127892273, - 0.1956677579845722, - 0.20774045016425718, - 0.16193278944159428, - 0.13174198906539808, - 0.14830508550670007, - 0.14241105864278342, - 0.23090631643085724, - 0.16056153813499532, - 0.1480353269419819, - 0.10318719171632634, - 0.11867462580989198, - 0.12997011485830187, - 0.11401220332210203, - 0.15242746700662088, - 0.1628489107974574, - 0.19327235354175412, - 0.22184902863377548, - 0.18028049625972334, - 0.18958059106892552, - 0.07884601267057013, - ], - }, - "integration_workflows": { - "best_metric": 0.9217087924480438, - "infer_metric": 0.9214379042387009, - "output_sums": [ - 0.14209461212158203, - 0.15126705169677734, - 0.13800382614135742, - 0.1338181495666504, - 0.1850571632385254, - 0.16372442245483398, - 0.14059066772460938, - 0.16674423217773438, - 0.15653657913208008, - 0.17690563201904297, - 0.16154909133911133, - 0.16521310806274414, - 0.14388608932495117, - 0.1103353500366211, - 0.1609959602355957, - 0.1967010498046875, - 0.1746964454650879, - 0.05329275131225586, - 0.19098854064941406, - 0.19976520538330078, - 0.19576644897460938, - 0.20346736907958984, - 0.1601848602294922, - 0.1316051483154297, - 0.1511220932006836, - 0.13670969009399414, - 0.2276287078857422, - 0.1611800193786621, - 0.14751672744750977, - 0.10413789749145508, - 0.11944007873535156, - 0.1305546760559082, - 0.11204719543457031, - 0.15145111083984375, - 0.16007614135742188, - 0.1904129981994629, - 0.21741962432861328, - 0.17812013626098633, - 0.18587207794189453, - 0.03605222702026367, - ], - "best_metric_2": 0.9210659921169281, - "infer_metric_2": 0.9208109736442566, - "output_sums_2": [ - 0.14227628707885742, - 0.1515035629272461, - 0.13819408416748047, - 0.13402271270751953, - 0.18525266647338867, - 0.16388607025146484, - 0.14076614379882812, - 0.16694307327270508, - 0.15677356719970703, - 0.1771831512451172, - 0.16172313690185547, - 0.1653728485107422, - 0.14413118362426758, - 0.11057281494140625, - 0.16121912002563477, - 0.19680166244506836, - 0.1748638153076172, - 0.053426265716552734, - 0.19117307662963867, - 0.19996356964111328, - 0.1959366798400879, - 0.20363712310791016, - 0.16037797927856445, - 0.13180780410766602, - 0.1513657569885254, - 0.13686084747314453, - 0.2277364730834961, - 0.16137409210205078, - 0.1476879119873047, - 0.10438394546508789, - 0.11967992782592773, - 0.13080739974975586, - 0.11226606369018555, - 0.15168476104736328, - 0.1602616310119629, - 0.190582275390625, - 0.21756458282470703, - 0.17825984954833984, - 0.18604803085327148, - 0.036206722259521484, - ], - }, - }, - { # test answers for PyTorch 1.9 - "integration_workflows": { - "output_sums_2": [ - 0.14213180541992188, - 0.15153264999389648, - 0.13801145553588867, - 0.1338348388671875, - 0.18515968322753906, - 0.16404008865356445, - 0.14110612869262695, - 0.16686391830444336, - 0.15673542022705078, - 0.1772594451904297, - 0.16174745559692383, - 0.16518878936767578, - 0.1440296173095703, - 0.11033201217651367, - 0.1611781120300293, - 0.19660568237304688, - 0.17468547821044922, - 0.053053855895996094, - 0.1909656524658203, - 0.19952869415283203, - 0.1957845687866211, - 0.2034916877746582, - 0.16042661666870117, - 0.13193607330322266, - 0.15104389190673828, - 0.13695430755615234, - 0.22720861434936523, - 0.16157913208007812, - 0.14759159088134766, - 0.10379791259765625, - 0.11937189102172852, - 0.1306462287902832, - 0.11205482482910156, - 0.15182113647460938, - 0.16006708145141602, - 0.19011592864990234, - 0.21713829040527344, - 0.17794132232666016, - 0.18584394454956055, - 0.03577899932861328, - ] - }, - "integration_segmentation_3d": { # for the mixed readers - "losses": [ - 0.5645154356956482, - 0.4984356611967087, - 0.472334086894989, - 0.47419720590114595, - 0.45881829261779783, - 0.43097741305828097, - ], - "best_metric": 0.9325698614120483, - "infer_metric": 0.9326590299606323, - }, - }, { # test answers for PyTorch 1.13 "integration_workflows": { "output_sums_2": [ diff --git a/tests/transforms/test_resize.py b/tests/transforms/test_resize.py index 0e11035cf7..2b9014c3a3 100644 --- a/tests/transforms/test_resize.py +++ b/tests/transforms/test_resize.py @@ -27,7 +27,6 @@ SkipIfAtLeastPyTorchVersion, assert_allclose, is_tf32_env, - pytorch_after, ) TEST_CASE_0 = [{"spatial_size": 15}, (6, 10, 15)] @@ -70,7 +69,7 @@ def test_unchange(self): ((32, 32), "area", False), ((32, 32, 32), "trilinear", True), ((256, 256), "bilinear", False), - ((256, 256), "nearest-exact" if pytorch_after(1, 11) else "nearest", False), + ((256, 256), "nearest-exact", False), ((128, 128), "nearest", False), ((128, 64), "area", True), # already in a good shape ] diff --git a/tests/transforms/test_resize_with_pad_or_crop.py b/tests/transforms/test_resize_with_pad_or_crop.py index bcda36adeb..fe39f6a047 100644 --- a/tests/transforms/test_resize_with_pad_or_crop.py +++ b/tests/transforms/test_resize_with_pad_or_crop.py @@ -20,23 +20,13 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import ResizeWithPadOrCrop from monai.transforms.lazy.functional import apply_pending -from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, pytorch_after +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose TEST_CASES = [ [{"spatial_size": [15, 8, 8], "mode": "constant"}, (3, 8, 8, 4), (3, 15, 8, 8), True], [{"spatial_size": [15, 4, -1], "mode": "constant"}, (3, 8, 8, 4), (3, 15, 4, 4), True], - [ - {"spatial_size": [15, 4, -1], "mode": "reflect" if pytorch_after(1, 11) else "constant"}, - (3, 8, 8, 4), - (3, 15, 4, 4), - True, - ], - [ - {"spatial_size": [-1, -1, -1], "mode": "reflect" if pytorch_after(1, 11) else "constant"}, - (3, 8, 8, 4), - (3, 8, 8, 4), - True, - ], + [{"spatial_size": [15, 4, -1], "mode": "reflect"}, (3, 8, 8, 4), (3, 15, 4, 4), True], + [{"spatial_size": [-1, -1, -1], "mode": "reflect"}, (3, 8, 8, 4), (3, 8, 8, 4), True], [ {"spatial_size": [15, 4, 8], "mode": "constant", "method": "end", "constant_values": 1}, (3, 8, 8, 4), diff --git a/tests/transforms/test_resize_with_pad_or_cropd.py b/tests/transforms/test_resize_with_pad_or_cropd.py index 2e162d3f69..cd642291d8 100644 --- a/tests/transforms/test_resize_with_pad_or_cropd.py +++ b/tests/transforms/test_resize_with_pad_or_cropd.py @@ -21,22 +21,14 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import ResizeWithPadOrCropd from monai.transforms.lazy.functional import apply_pending -from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose, pytorch_after +from tests.test_utils import TEST_NDARRAYS_ALL, assert_allclose from tests.transforms.test_resize_with_pad_or_crop import TESTS_PENDING_MODE TEST_CASES = [ [{"keys": "img", "spatial_size": [15, 8, 8], "mode": "constant"}, {"img": np.zeros((3, 8, 8, 4))}, (3, 15, 8, 8)], [{"keys": "img", "spatial_size": [15, 4, -1], "mode": "constant"}, {"img": np.zeros((3, 8, 8, 4))}, (3, 15, 4, 4)], - [ - {"keys": "img", "spatial_size": [15, 4, -1], "mode": "reflect" if pytorch_after(1, 11) else "constant"}, - {"img": np.zeros((3, 8, 8, 4))}, - (3, 15, 4, 4), - ], - [ - {"keys": "img", "spatial_size": [-1, -1, -1], "mode": "reflect" if pytorch_after(1, 11) else "constant"}, - {"img": np.zeros((3, 8, 8, 4))}, - (3, 8, 8, 4), - ], + [{"keys": "img", "spatial_size": [15, 4, -1], "mode": "reflect"}, {"img": np.zeros((3, 8, 8, 4))}, (3, 15, 4, 4)], + [{"keys": "img", "spatial_size": [-1, -1, -1], "mode": "reflect"}, {"img": np.zeros((3, 8, 8, 4))}, (3, 8, 8, 4)], [ {"keys": "img", "spatial_size": [15, 4, 8], "mode": "constant", "method": "end", "constant_values": 1}, {"img": np.zeros((3, 8, 8, 4))}, From 749693bba56158a74997beeacf6ceae17d97845c Mon Sep 17 00:00:00 2001 From: Virginia Fernandez <61539159+virginiafdez@users.noreply.github.com> Date: Tue, 18 Feb 2025 12:57:02 +0000 Subject: [PATCH 15/55] Inferer modification - save_intermediates clashes with latent shape adjustment in latent diffusion inferers (#8343) Fixes #8334 ### Description There was an if save_intermediates missing in the code that was trying to run crop of the latent spaces on the sample function of the Latent Diffusion Inferers (normal one and ControlNet one) even when intermediates aren't created. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). --------- Signed-off-by: Virginia Fernandez Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Co-authored-by: Virginia Fernandez Co-authored-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/inferers/inferer.py | 17 ++-- tests/inferers/test_controlnet_inferers.py | 82 ++++++++++++++++++- .../inferers/test_latent_diffusion_inferer.py | 62 +++++++++++++- 3 files changed, 151 insertions(+), 10 deletions(-) diff --git a/monai/inferers/inferer.py b/monai/inferers/inferer.py index 61fbacd1a7..810afa0be3 100644 --- a/monai/inferers/inferer.py +++ b/monai/inferers/inferer.py @@ -1213,15 +1213,16 @@ def sample( # type: ignore[override] if self.autoencoder_latent_shape is not None: latent = torch.stack([self.autoencoder_resizer(i) for i in decollate_batch(latent)], 0) - latent_intermediates = [ - torch.stack([self.autoencoder_resizer(i) for i in decollate_batch(l)], 0) for l in latent_intermediates - ] + if save_intermediates: + latent_intermediates = [ + torch.stack([self.autoencoder_resizer(i) for i in decollate_batch(l)], 0) + for l in latent_intermediates + ] decode = autoencoder_model.decode_stage_2_outputs if isinstance(autoencoder_model, SPADEAutoencoderKL): decode = partial(autoencoder_model.decode_stage_2_outputs, seg=seg) image = decode(latent / self.scale_factor) - if save_intermediates: intermediates = [] for latent_intermediate in latent_intermediates: @@ -1738,9 +1739,11 @@ def sample( # type: ignore[override] if self.autoencoder_latent_shape is not None: latent = torch.stack([self.autoencoder_resizer(i) for i in decollate_batch(latent)], 0) - latent_intermediates = [ - torch.stack([self.autoencoder_resizer(i) for i in decollate_batch(l)], 0) for l in latent_intermediates - ] + if save_intermediates: + latent_intermediates = [ + torch.stack([self.autoencoder_resizer(i) for i in decollate_batch(l)], 0) + for l in latent_intermediates + ] decode = autoencoder_model.decode_stage_2_outputs if isinstance(autoencoder_model, SPADEAutoencoderKL): diff --git a/tests/inferers/test_controlnet_inferers.py b/tests/inferers/test_controlnet_inferers.py index e3b0aeb5a2..2ab5cec335 100644 --- a/tests/inferers/test_controlnet_inferers.py +++ b/tests/inferers/test_controlnet_inferers.py @@ -722,7 +722,7 @@ def test_prediction_shape( @parameterized.expand(LATENT_CNDM_TEST_CASES) @skipUnless(has_einops, "Requires einops") - def test_sample_shape( + def test_pred_shape( self, ae_model_type, autoencoder_params, @@ -1165,7 +1165,7 @@ def test_sample_shape_conditioned_concat( @parameterized.expand(LATENT_CNDM_TEST_CASES_DIFF_SHAPES) @skipUnless(has_einops, "Requires einops") - def test_sample_shape_different_latents( + def test_shape_different_latents( self, ae_model_type, autoencoder_params, @@ -1242,6 +1242,84 @@ def test_sample_shape_different_latents( ) self.assertEqual(prediction.shape, latent_shape) + @parameterized.expand(LATENT_CNDM_TEST_CASES_DIFF_SHAPES) + @skipUnless(has_einops, "Requires einops") + def test_sample_shape_different_latents( + self, + ae_model_type, + autoencoder_params, + dm_model_type, + stage_2_params, + controlnet_params, + input_shape, + latent_shape, + ): + stage_1 = None + + if ae_model_type == "AutoencoderKL": + stage_1 = AutoencoderKL(**autoencoder_params) + if ae_model_type == "VQVAE": + stage_1 = VQVAE(**autoencoder_params) + if ae_model_type == "SPADEAutoencoderKL": + stage_1 = SPADEAutoencoderKL(**autoencoder_params) + if dm_model_type == "SPADEDiffusionModelUNet": + stage_2 = SPADEDiffusionModelUNet(**stage_2_params) + else: + stage_2 = DiffusionModelUNet(**stage_2_params) + controlnet = ControlNet(**controlnet_params) + + device = "cuda:0" if torch.cuda.is_available() else "cpu" + stage_1.to(device) + stage_2.to(device) + controlnet.to(device) + stage_1.eval() + stage_2.eval() + controlnet.eval() + + noise = torch.randn(latent_shape).to(device) + mask = torch.randn(input_shape).to(device) + scheduler = DDPMScheduler(num_train_timesteps=10) + # We infer the VAE shape + if ae_model_type == "VQVAE": + autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]))) for i in input_shape[2:]] + else: + autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]] + + inferer = ControlNetLatentDiffusionInferer( + scheduler=scheduler, + scale_factor=1.0, + ldm_latent_shape=list(latent_shape[2:]), + autoencoder_latent_shape=autoencoder_latent_shape, + ) + scheduler.set_timesteps(num_inference_steps=10) + + if dm_model_type == "SPADEDiffusionModelUNet" or ae_model_type == "SPADEAutoencoderKL": + input_shape_seg = list(input_shape) + if "label_nc" in stage_2_params.keys(): + input_shape_seg[1] = stage_2_params["label_nc"] + else: + input_shape_seg[1] = autoencoder_params["label_nc"] + input_seg = torch.randn(input_shape_seg).to(device) + prediction, _ = inferer.sample( + autoencoder_model=stage_1, + diffusion_model=stage_2, + controlnet=controlnet, + cn_cond=mask, + input_noise=noise, + seg=input_seg, + save_intermediates=True, + ) + else: + prediction = inferer.sample( + autoencoder_model=stage_1, + diffusion_model=stage_2, + input_noise=noise, + controlnet=controlnet, + cn_cond=mask, + save_intermediates=False, + ) + self.assertEqual(prediction.shape, input_shape) + @skipUnless(has_einops, "Requires einops") def test_incompatible_spade_setup(self): stage_1 = SPADEAutoencoderKL( diff --git a/tests/inferers/test_latent_diffusion_inferer.py b/tests/inferers/test_latent_diffusion_inferer.py index 2e04ad6c5c..4f81b96ca1 100644 --- a/tests/inferers/test_latent_diffusion_inferer.py +++ b/tests/inferers/test_latent_diffusion_inferer.py @@ -714,7 +714,7 @@ def test_sample_shape_conditioned_concat( @parameterized.expand(TEST_CASES_DIFF_SHAPES) @skipUnless(has_einops, "Requires einops") - def test_sample_shape_different_latents( + def test_shape_different_latents( self, ae_model_type, autoencoder_params, dm_model_type, stage_2_params, input_shape, latent_shape ): stage_1 = None @@ -772,6 +772,66 @@ def test_sample_shape_different_latents( ) self.assertEqual(prediction.shape, latent_shape) + @parameterized.expand(TEST_CASES_DIFF_SHAPES) + @skipUnless(has_einops, "Requires einops") + def test_sample_shape_different_latents( + self, ae_model_type, autoencoder_params, dm_model_type, stage_2_params, input_shape, latent_shape + ): + stage_1 = None + + if ae_model_type == "AutoencoderKL": + stage_1 = AutoencoderKL(**autoencoder_params) + if ae_model_type == "VQVAE": + stage_1 = VQVAE(**autoencoder_params) + if ae_model_type == "SPADEAutoencoderKL": + stage_1 = SPADEAutoencoderKL(**autoencoder_params) + if dm_model_type == "SPADEDiffusionModelUNet": + stage_2 = SPADEDiffusionModelUNet(**stage_2_params) + else: + stage_2 = DiffusionModelUNet(**stage_2_params) + + device = "cuda:0" if torch.cuda.is_available() else "cpu" + stage_1.to(device) + stage_2.to(device) + stage_1.eval() + stage_2.eval() + + noise = torch.randn(latent_shape).to(device) + scheduler = DDPMScheduler(num_train_timesteps=10) + # We infer the VAE shape + if ae_model_type == "VQVAE": + autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]))) for i in input_shape[2:]] + else: + autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]] + + inferer = LatentDiffusionInferer( + scheduler=scheduler, + scale_factor=1.0, + ldm_latent_shape=list(latent_shape[2:]), + autoencoder_latent_shape=autoencoder_latent_shape, + ) + scheduler.set_timesteps(num_inference_steps=10) + + if dm_model_type == "SPADEDiffusionModelUNet" or ae_model_type == "SPADEAutoencoderKL": + input_shape_seg = list(input_shape) + if "label_nc" in stage_2_params.keys(): + input_shape_seg[1] = stage_2_params["label_nc"] + else: + input_shape_seg[1] = autoencoder_params["label_nc"] + input_seg = torch.randn(input_shape_seg).to(device) + prediction, _ = inferer.sample( + autoencoder_model=stage_1, + diffusion_model=stage_2, + input_noise=noise, + save_intermediates=True, + seg=input_seg, + ) + else: + prediction = inferer.sample( + autoencoder_model=stage_1, diffusion_model=stage_2, input_noise=noise, save_intermediates=False + ) + self.assertEqual(prediction.shape, input_shape) + @skipUnless(has_einops, "Requires einops") def test_incompatible_spade_setup(self): stage_1 = SPADEAutoencoderKL( From 599f8a91ab344e32066a841723d36780659912cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Wed, 19 Feb 2025 07:40:38 +0100 Subject: [PATCH 16/55] Fix `packaging` imports in version comparison logic (#8347) Fixes #8349 ### Description The current behaviour is that `pkging, has_ver = optional_import("packaging.Version")` always returns `has_ver=False` because the import always fails (the `Version` class is exposed by the `packaging.version` submodule). This issue previously didn't surface, because when the import fails, it would just continue to use the fallback logic. However, there seem to be more hidden and more severe implications, which ultimately led me to discovering this particular bug: Function like `floor_divide()` in `monai.transforms` that check the module version using this logic are called many times in common ML dataloading workflows. The failed imports somehow can lead to OOM errors and the main process being killed (see https://github.com/Project-MONAI/MONAI/issues/8348). Maybe when `optional_import` fails to import a module, the lazy exceptions somehow stack up in memory when this function is called many times in a short time period? ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. Signed-off-by: Can-Zhao --- monai/utils/module.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/monai/utils/module.py b/monai/utils/module.py index d3f2ff09f2..7bbbb4ab1e 100644 --- a/monai/utils/module.py +++ b/monai/utils/module.py @@ -540,11 +540,11 @@ def version_leq(lhs: str, rhs: str) -> bool: """ lhs, rhs = str(lhs), str(rhs) - pkging, has_ver = optional_import("packaging.Version") + pkging, has_ver = optional_import("packaging.version") if has_ver: try: - return cast(bool, pkging.version.Version(lhs) <= pkging.version.Version(rhs)) - except pkging.version.InvalidVersion: + return cast(bool, pkging.Version(lhs) <= pkging.Version(rhs)) + except pkging.InvalidVersion: return True lhs_, rhs_ = parse_version_strs(lhs, rhs) @@ -567,12 +567,12 @@ def version_geq(lhs: str, rhs: str) -> bool: """ lhs, rhs = str(lhs), str(rhs) - pkging, has_ver = optional_import("packaging.Version") + pkging, has_ver = optional_import("packaging.version") if has_ver: try: - return cast(bool, pkging.version.Version(lhs) >= pkging.version.Version(rhs)) - except pkging.version.InvalidVersion: + return cast(bool, pkging.Version(lhs) >= pkging.Version(rhs)) + except pkging.InvalidVersion: return True lhs_, rhs_ = parse_version_strs(lhs, rhs) From 87a6c4c27af19bce87a3b3ca1fa51515b37ad8e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20K=C3=A4nzig?= <36882833+nkaenzig@users.noreply.github.com> Date: Wed, 19 Feb 2025 19:01:30 +0100 Subject: [PATCH 17/55] Removed outdated `torch` version checks from transform functions (#8359) Fixes #8348 ### Description Support for `torch` versions prior to `1.13` has been dropped, so those `1.8` version checks are not required anymore. Furthermore, as reported in the issue description, those checks led to unstable behaviour when using certain transforms in data pipelines. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: Nicolas Kaenzig Signed-off-by: Can-Zhao --- monai/transforms/utility/array.py | 14 ++------------ .../transforms/utils_pytorch_numpy_unification.py | 6 ++---- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 2963c8a2f8..8491e4739c 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -66,7 +66,6 @@ optional_import, ) from monai.utils.enums import TransformBackends -from monai.utils.misc import is_module_ver_at_least from monai.utils.type_conversion import convert_to_dst_type, get_dtype_string, get_equivalent_dtype PILImageImage, has_pil = optional_import("PIL.Image", name="Image") @@ -939,19 +938,10 @@ def __call__( data = img[[*select_labels]] else: where: Callable = np.where if isinstance(img, np.ndarray) else torch.where # type: ignore - if isinstance(img, np.ndarray) or is_module_ver_at_least(torch, (1, 8, 0)): - data = where(in1d(img, select_labels), True, False).reshape(img.shape) - # pre pytorch 1.8.0, need to use 1/0 instead of True/False - else: - data = where( - in1d(img, select_labels), torch.tensor(1, device=img.device), torch.tensor(0, device=img.device) - ).reshape(img.shape) + data = where(in1d(img, select_labels), True, False).reshape(img.shape) if merge_channels or self.merge_channels: - if isinstance(img, np.ndarray) or is_module_ver_at_least(torch, (1, 8, 0)): - return data.any(0)[None] - # pre pytorch 1.8.0 compatibility - return data.to(torch.uint8).any(0)[None].to(bool) # type: ignore + return data.any(0)[None] return data diff --git a/monai/transforms/utils_pytorch_numpy_unification.py b/monai/transforms/utils_pytorch_numpy_unification.py index 365bd1eab5..8f22d00674 100644 --- a/monai/transforms/utils_pytorch_numpy_unification.py +++ b/monai/transforms/utils_pytorch_numpy_unification.py @@ -18,7 +18,6 @@ import torch from monai.config.type_definitions import NdarrayOrTensor, NdarrayTensor -from monai.utils.misc import is_module_ver_at_least from monai.utils.type_conversion import convert_data_type, convert_to_dst_type __all__ = [ @@ -215,10 +214,9 @@ def floor_divide(a: NdarrayOrTensor, b) -> NdarrayOrTensor: Element-wise floor division between two arrays/tensors. """ if isinstance(a, torch.Tensor): - if is_module_ver_at_least(torch, (1, 8, 0)): - return torch.div(a, b, rounding_mode="floor") return torch.floor_divide(a, b) - return np.floor_divide(a, b) + else: + return np.floor_divide(a, b) def unravel_index(idx, shape) -> NdarrayOrTensor: From 17440c88c50b6ec82eed28c51e5ae53ed024069f Mon Sep 17 00:00:00 2001 From: Bartosz Grabowski <58475557+bartosz-grabowski@users.noreply.github.com> Date: Thu, 20 Feb 2025 09:44:10 +0100 Subject: [PATCH 18/55] Fix CommonKeys docstring (#8342) ### Description `CommonKeys()` docstring mentions `INFO` which doesn't exist. Instead there is a `METADATA` field, so the docstring was updated accordingly. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. Signed-off-by: Bartosz Grabowski <58475557+bartosz-grabowski@users.noreply.github.com> Co-authored-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/utils/enums.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/utils/enums.py b/monai/utils/enums.py index 1fbf3ffa05..ac14134acc 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -335,7 +335,7 @@ class CommonKeys(StrEnum): `LABEL` is the training or evaluation label of segmentation or classification task. `PRED` is the prediction data of model output. `LOSS` is the loss value of current iteration. - `INFO` is some useful information during training or evaluation, like loss value, etc. + `METADATA` is some useful information during training or evaluation, like loss value, etc. """ From 90dd2cc697be1961dc2a642353c688cc6e44b095 Mon Sep 17 00:00:00 2001 From: Thibault de Varax <154365476+thibaultdvx@users.noreply.github.com> Date: Thu, 20 Feb 2025 13:27:37 +0100 Subject: [PATCH 19/55] Add Average Precision to metrics (#8089) Fixes #8085. ### Description Average Precision is very similar to ROCAUC, so I was very much inspired by the ROCAUC implementation. More precisely, I created: - `AveragePrecisionMetric` and `compute_average_precision` in `monai.metrics`, - a handler called `AveragePrecision` in `monai.handlers`, - three unittest modules: `test_compute_average_precision.py`, `test_handler_average_precision.py` and `test_handler_average_precision_dist.py`. I also modified the docs to mention Average Precision. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [x] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [x] In-line docstrings updated. - [x] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: thibaultdvx Signed-off-by: Thibault de Varax <154365476+thibaultdvx@users.noreply.github.com> Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Signed-off-by: Can-Zhao --- docs/source/handlers.rst | 6 + docs/source/metrics.rst | 7 + monai/handlers/__init__.py | 1 + monai/handlers/average_precision.py | 53 +++++ monai/metrics/__init__.py | 1 + monai/metrics/average_precision.py | 187 ++++++++++++++++++ monai/utils/enums.py | 3 +- .../test_handler_average_precision.py | 79 ++++++++ .../metrics/test_compute_average_precision.py | 162 +++++++++++++++ tests/min_tests.py | 1 + 10 files changed, 499 insertions(+), 1 deletion(-) create mode 100644 monai/handlers/average_precision.py create mode 100644 monai/metrics/average_precision.py create mode 100644 tests/handlers/test_handler_average_precision.py create mode 100644 tests/metrics/test_compute_average_precision.py diff --git a/docs/source/handlers.rst b/docs/source/handlers.rst index 270083f717..49c84dab28 100644 --- a/docs/source/handlers.rst +++ b/docs/source/handlers.rst @@ -53,6 +53,12 @@ ROC AUC metrics handler :members: +Average Precision metric handler +-------------------------------- +.. autoclass:: AveragePrecision + :members: + + Confusion matrix metrics handler -------------------------------- .. autoclass:: ConfusionMatrix diff --git a/docs/source/metrics.rst b/docs/source/metrics.rst index 616f0fe385..45e0827cf9 100644 --- a/docs/source/metrics.rst +++ b/docs/source/metrics.rst @@ -80,6 +80,13 @@ Metrics .. autoclass:: ROCAUCMetric :members: +`Average Precision` +------------------- +.. autofunction:: compute_average_precision + +.. autoclass:: AveragePrecisionMetric + :members: + `Confusion matrix` ------------------ .. autofunction:: get_confusion_matrix diff --git a/monai/handlers/__init__.py b/monai/handlers/__init__.py index c1fa448f25..ed5db8a7f3 100644 --- a/monai/handlers/__init__.py +++ b/monai/handlers/__init__.py @@ -11,6 +11,7 @@ from __future__ import annotations +from .average_precision import AveragePrecision from .checkpoint_loader import CheckpointLoader from .checkpoint_saver import CheckpointSaver from .classification_saver import ClassificationSaver diff --git a/monai/handlers/average_precision.py b/monai/handlers/average_precision.py new file mode 100644 index 0000000000..608d7eea72 --- /dev/null +++ b/monai/handlers/average_precision.py @@ -0,0 +1,53 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from collections.abc import Callable + +from monai.handlers.ignite_metric import IgniteMetricHandler +from monai.metrics import AveragePrecisionMetric +from monai.utils import Average + + +class AveragePrecision(IgniteMetricHandler): + """ + Computes Average Precision (AP). + accumulating predictions and the ground-truth during an epoch and applying `compute_average_precision`. + + Args: + average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``} + Type of averaging performed if not binary classification. Defaults to ``"macro"``. + + - ``"macro"``: calculate metrics for each label, and find their unweighted mean. + This does not take label imbalance into account. + - ``"weighted"``: calculate metrics for each label, and find their average, + weighted by support (the number of true instances for each label). + - ``"micro"``: calculate metrics globally by considering each element of the label + indicator matrix as a label. + - ``"none"``: the scores for each class are returned. + + output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then + construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or + lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`. + `engine.state` and `output_transform` inherit from the ignite concept: + https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial: + https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb. + + Note: + Average Precision expects y to be comprised of 0's and 1's. + y_pred must either be probability estimates or confidence values. + + """ + + def __init__(self, average: Average | str = Average.MACRO, output_transform: Callable = lambda x: x) -> None: + metric_fn = AveragePrecisionMetric(average=Average(average)) + super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=False) diff --git a/monai/metrics/__init__.py b/monai/metrics/__init__.py index 201acdfa50..7176f3311f 100644 --- a/monai/metrics/__init__.py +++ b/monai/metrics/__init__.py @@ -12,6 +12,7 @@ from __future__ import annotations from .active_learning_metrics import LabelQualityScore, VarianceMetric, compute_variance, label_quality_score +from .average_precision import AveragePrecisionMetric, compute_average_precision from .confusion_matrix import ConfusionMatrixMetric, compute_confusion_matrix_metric, get_confusion_matrix from .cumulative_average import CumulativeAverage from .f_beta_score import FBetaScore diff --git a/monai/metrics/average_precision.py b/monai/metrics/average_precision.py new file mode 100644 index 0000000000..53c41aeca5 --- /dev/null +++ b/monai/metrics/average_precision.py @@ -0,0 +1,187 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import warnings +from typing import TYPE_CHECKING, cast + +import numpy as np + +if TYPE_CHECKING: + import numpy.typing as npt + +import torch + +from monai.utils import Average, look_up_option + +from .metric import CumulativeIterationMetric + + +class AveragePrecisionMetric(CumulativeIterationMetric): + """ + Computes Average Precision (AP). AP is a useful metric to evaluate a classifier when the classes are + imbalanced. It can take values between 0.0 and 1.0, 1.0 being the best possible score. + It summarizes a Precision-Recall curve as the weighted mean of precisions achieved at each + threshold, with the increase in recall from the previous threshold used as the weight: + + .. math:: + \\text{AP} = \\sum_n (R_n - R_{n-1}) P_n + :label: ap + + where :math:`P_n` and :math:`R_n` are the precision and recall at the :math:`n^{th}` threshold. + + Referring to: `sklearn.metrics.average_precision_score + `_. + + The input `y_pred` and `y` can be a list of `channel-first` Tensor or a `batch-first` Tensor. + + Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`. + + Args: + average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``} + Type of averaging performed if not binary classification. + Defaults to ``"macro"``. + + - ``"macro"``: calculate metrics for each label, and find their unweighted mean. + This does not take label imbalance into account. + - ``"weighted"``: calculate metrics for each label, and find their average, + weighted by support (the number of true instances for each label). + - ``"micro"``: calculate metrics globally by considering each element of the label + indicator matrix as a label. + - ``"none"``: the scores for each class are returned. + + """ + + def __init__(self, average: Average | str = Average.MACRO) -> None: + super().__init__() + self.average = average + + def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: # type: ignore[override] + return y_pred, y + + def aggregate(self, average: Average | str | None = None) -> np.ndarray | float | npt.ArrayLike: + """ + Typically `y_pred` and `y` are stored in the cumulative buffers at each iteration, + This function reads the buffers and computes the Average Precision. + + Args: + average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``} + Type of averaging performed if not binary classification. Defaults to `self.average`. + + """ + y_pred, y = self.get_buffer() + # compute final value and do metric reduction + if not isinstance(y_pred, torch.Tensor) or not isinstance(y, torch.Tensor): + raise ValueError("y_pred and y must be PyTorch Tensor.") + + return compute_average_precision(y_pred=y_pred, y=y, average=average or self.average) + + +def _calculate(y_pred: torch.Tensor, y: torch.Tensor) -> float: + if not (y.ndimension() == y_pred.ndimension() == 1 and len(y) == len(y_pred)): + raise AssertionError("y and y_pred must be 1 dimension data with same length.") + y_unique = y.unique() + if len(y_unique) == 1: + warnings.warn(f"y values can not be all {y_unique.item()}, skip AP computation and return `Nan`.") + return float("nan") + if not y_unique.equal(torch.tensor([0, 1], dtype=y.dtype, device=y.device)): + warnings.warn(f"y values must be 0 or 1, but in {y_unique.tolist()}, skip AP computation and return `Nan`.") + return float("nan") + + n = len(y) + indices = y_pred.argsort(descending=True) + y = y[indices].cpu().numpy() # type: ignore[assignment] + y_pred = y_pred[indices].cpu().numpy() # type: ignore[assignment] + npos = ap = tmp_pos = 0.0 + + for i in range(n): + y_i = cast(float, y[i]) + if i + 1 < n and y_pred[i] == y_pred[i + 1]: + tmp_pos += y_i + else: + tmp_pos += y_i + npos += tmp_pos + ap += tmp_pos * npos / (i + 1) + tmp_pos = 0 + + return ap / npos + + +def compute_average_precision( + y_pred: torch.Tensor, y: torch.Tensor, average: Average | str = Average.MACRO +) -> np.ndarray | float | npt.ArrayLike: + """Computes Average Precision (AP). AP is a useful metric to evaluate a classifier when the classes are + imbalanced. It summarizes a Precision-Recall according to equation :eq:`ap`. + Referring to: `sklearn.metrics.average_precision_score + `_. + + Args: + y_pred: input data to compute, typical classification model output. + the first dim must be batch, if multi-classes, it must be in One-Hot format. + for example: shape `[16]` or `[16, 1]` for a binary data, shape `[16, 2]` for 2 classes data. + y: ground truth to compute AP metric, the first dim must be batch. + if multi-classes, it must be in One-Hot format. + for example: shape `[16]` or `[16, 1]` for a binary data, shape `[16, 2]` for 2 classes data. + average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``} + Type of averaging performed if not binary classification. + Defaults to ``"macro"``. + + - ``"macro"``: calculate metrics for each label, and find their unweighted mean. + This does not take label imbalance into account. + - ``"weighted"``: calculate metrics for each label, and find their average, + weighted by support (the number of true instances for each label). + - ``"micro"``: calculate metrics globally by considering each element of the label + indicator matrix as a label. + - ``"none"``: the scores for each class are returned. + + Raises: + ValueError: When ``y_pred`` dimension is not one of [1, 2]. + ValueError: When ``y`` dimension is not one of [1, 2]. + ValueError: When ``average`` is not one of ["macro", "weighted", "micro", "none"]. + + Note: + Average Precision expects y to be comprised of 0's and 1's. `y_pred` must be either prob. estimates or confidence values. + + """ + y_pred_ndim = y_pred.ndimension() + y_ndim = y.ndimension() + if y_pred_ndim not in (1, 2): + raise ValueError( + f"Predictions should be of shape (batch_size, num_classes) or (batch_size, ), got {y_pred.shape}." + ) + if y_ndim not in (1, 2): + raise ValueError(f"Targets should be of shape (batch_size, num_classes) or (batch_size, ), got {y.shape}.") + if y_pred_ndim == 2 and y_pred.shape[1] == 1: + y_pred = y_pred.squeeze(dim=-1) + y_pred_ndim = 1 + if y_ndim == 2 and y.shape[1] == 1: + y = y.squeeze(dim=-1) + + if y_pred_ndim == 1: + return _calculate(y_pred, y) + + if y.shape != y_pred.shape: + raise ValueError(f"data shapes of y_pred and y do not match, got {y_pred.shape} and {y.shape}.") + + average = look_up_option(average, Average) + if average == Average.MICRO: + return _calculate(y_pred.flatten(), y.flatten()) + y, y_pred = y.transpose(0, 1), y_pred.transpose(0, 1) + ap_values = [_calculate(y_pred_, y_) for y_pred_, y_ in zip(y_pred, y)] + if average == Average.NONE: + return ap_values + if average == Average.MACRO: + return np.mean(ap_values) + if average == Average.WEIGHTED: + weights = [sum(y_) for y_ in y] + return np.average(ap_values, weights=weights) # type: ignore[no-any-return] + raise ValueError(f'Unsupported average: {average}, available options are ["macro", "weighted", "micro", "none"].') diff --git a/monai/utils/enums.py b/monai/utils/enums.py index ac14134acc..3463a92e4b 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -213,7 +213,8 @@ class GridSamplePadMode(StrEnum): class Average(StrEnum): """ - See also: :py:class:`monai.metrics.rocauc.compute_roc_auc` + See also: :py:class:`monai.metrics.rocauc.compute_roc_auc` or + :py:class:`monai.metrics.average_precision.compute_average_precision` """ MACRO = "macro" diff --git a/tests/handlers/test_handler_average_precision.py b/tests/handlers/test_handler_average_precision.py new file mode 100644 index 0000000000..7f52a5ee9c --- /dev/null +++ b/tests/handlers/test_handler_average_precision.py @@ -0,0 +1,79 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np +import torch +import torch.distributed as dist + +from monai.handlers import AveragePrecision +from monai.transforms import Activations, AsDiscrete +from tests.test_utils import DistCall, DistTestCase + + +class TestHandlerAveragePrecision(unittest.TestCase): + + def test_compute(self): + ap_metric = AveragePrecision() + act = Activations(softmax=True) + to_onehot = AsDiscrete(to_onehot=2) + + y_pred = [torch.Tensor([0.1, 0.9]), torch.Tensor([0.3, 1.4])] + y = [torch.Tensor([0]), torch.Tensor([1])] + y_pred = [act(p) for p in y_pred] + y = [to_onehot(y_) for y_ in y] + ap_metric.update([y_pred, y]) + + y_pred = [torch.Tensor([0.2, 0.1]), torch.Tensor([0.1, 0.5])] + y = [torch.Tensor([0]), torch.Tensor([1])] + y_pred = [act(p) for p in y_pred] + y = [to_onehot(y_) for y_ in y] + + ap_metric.update([y_pred, y]) + + ap = ap_metric.compute() + np.testing.assert_allclose(0.8333333, ap) + + +class DistributedAveragePrecision(DistTestCase): + + @DistCall(nnodes=1, nproc_per_node=2, node_rank=0) + def test_compute(self): + ap_metric = AveragePrecision() + act = Activations(softmax=True) + to_onehot = AsDiscrete(to_onehot=2) + + device = f"cuda:{dist.get_rank()}" if torch.cuda.is_available() else "cpu" + if dist.get_rank() == 0: + y_pred = [torch.tensor([0.1, 0.9], device=device), torch.tensor([0.3, 1.4], device=device)] + y = [torch.tensor([0], device=device), torch.tensor([1], device=device)] + + if dist.get_rank() == 1: + y_pred = [ + torch.tensor([0.2, 0.1], device=device), + torch.tensor([0.1, 0.5], device=device), + torch.tensor([0.3, 0.4], device=device), + ] + y = [torch.tensor([0], device=device), torch.tensor([1], device=device), torch.tensor([1], device=device)] + + y_pred = [act(p) for p in y_pred] + y = [to_onehot(y_) for y_ in y] + ap_metric.update([y_pred, y]) + + result = ap_metric.compute() + np.testing.assert_allclose(0.7778, result, rtol=1e-4) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/metrics/test_compute_average_precision.py b/tests/metrics/test_compute_average_precision.py new file mode 100644 index 0000000000..819bb61a42 --- /dev/null +++ b/tests/metrics/test_compute_average_precision.py @@ -0,0 +1,162 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.data import decollate_batch +from monai.metrics import AveragePrecisionMetric, compute_average_precision +from monai.transforms import Activations, AsDiscrete, Compose, ToTensor + +_device = "cuda:0" if torch.cuda.is_available() else "cpu" +TEST_CASE_1 = [ + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]], device=_device), + torch.tensor([[0], [0], [1], [1]], device=_device), + True, + 2, + "macro", + 0.41667, +] + +TEST_CASE_2 = [ + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]], device=_device), + torch.tensor([[1], [1], [0], [0]], device=_device), + True, + 2, + "micro", + 0.85417, +] + +TEST_CASE_3 = [ + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]], device=_device), + torch.tensor([[0], [1], [0], [1]], device=_device), + True, + 2, + "macro", + 0.83333, +] + +TEST_CASE_4 = [ + torch.tensor([[0.5], [0.5], [0.2], [8.3]]), + torch.tensor([[0], [1], [0], [1]]), + False, + None, + "macro", + 0.83333, +] + +TEST_CASE_5 = [torch.tensor([[0.5], [0.5], [0.2], [8.3]]), torch.tensor([0, 1, 0, 1]), False, None, "macro", 0.83333] + +TEST_CASE_6 = [torch.tensor([0.5, 0.5, 0.2, 8.3]), torch.tensor([0, 1, 0, 1]), False, None, "macro", 0.83333] + +TEST_CASE_7 = [ + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), + torch.tensor([[0], [1], [0], [1]]), + True, + 2, + "none", + [0.83333, 0.83333], +] + +TEST_CASE_8 = [ + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]), + torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]), + True, + None, + "weighted", + 0.66667, +] + +TEST_CASE_9 = [ + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]), + torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]), + True, + None, + "micro", + 0.71111, +] + +TEST_CASE_10 = [ + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), + torch.tensor([[0], [0], [0], [0]]), + True, + 2, + "macro", + float("nan"), +] + +TEST_CASE_11 = [ + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), + torch.tensor([[1], [1], [1], [1]]), + True, + 2, + "macro", + float("nan"), +] + +TEST_CASE_12 = [ + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), + torch.tensor([[0, 0], [1, 1], [2, 2], [3, 3]]), + True, + None, + "macro", + float("nan"), +] + +ALL_TESTS = [ + TEST_CASE_1, + TEST_CASE_2, + TEST_CASE_3, + TEST_CASE_4, + TEST_CASE_5, + TEST_CASE_6, + TEST_CASE_7, + TEST_CASE_8, + TEST_CASE_9, + TEST_CASE_10, + TEST_CASE_11, + TEST_CASE_12, +] + + +class TestComputeAveragePrecision(unittest.TestCase): + + @parameterized.expand(ALL_TESTS) + def test_value(self, y_pred, y, softmax, to_onehot, average, expected_value): + y_pred_trans = Compose([ToTensor(), Activations(softmax=softmax)]) + y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot)]) + y_pred = torch.stack([y_pred_trans(i) for i in decollate_batch(y_pred)], dim=0) + y = torch.stack([y_trans(i) for i in decollate_batch(y)], dim=0) + result = compute_average_precision(y_pred=y_pred, y=y, average=average) + np.testing.assert_allclose(expected_value, result, rtol=1e-5) + + @parameterized.expand(ALL_TESTS) + def test_class_value(self, y_pred, y, softmax, to_onehot, average, expected_value): + y_pred_trans = Compose([ToTensor(), Activations(softmax=softmax)]) + y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot)]) + y_pred = [y_pred_trans(i) for i in decollate_batch(y_pred)] + y = [y_trans(i) for i in decollate_batch(y)] + metric = AveragePrecisionMetric(average=average) + metric(y_pred=y_pred, y=y) + result = metric.aggregate() + np.testing.assert_allclose(expected_value, result, rtol=1e-5) + result = metric.aggregate(average=average) # test optional argument + metric.reset() + np.testing.assert_allclose(expected_value, result, rtol=1e-5) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/min_tests.py b/tests/min_tests.py index 1fc3da4a19..12f494be9c 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -76,6 +76,7 @@ def run_testsuit(): "test_grid_patch", "test_gmm", "test_handler_metrics_reloaded", + "test_handler_average_precision", "test_handler_checkpoint_loader", "test_handler_checkpoint_saver", "test_handler_classification_saver", From ab46efc19b73d8f8df89d275d9f8432e1e2e9cac Mon Sep 17 00:00:00 2001 From: Rafael Garcia-Dias Date: Thu, 20 Feb 2025 14:11:20 +0000 Subject: [PATCH 20/55] Solves path problem in test_bundle_trt_export.py (#8357) Fixes #8354 ### Description Fixes path on test that is only run on special conditions. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [x] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: R. Garcia-Dias Co-authored-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Signed-off-by: Can-Zhao --- README.md | 3 ++- tests/bundle/test_bundle_trt_export.py | 2 +- tests/networks/test_convert_to_onnx.py | 2 +- tests/test_utils.py | 18 +++++++++++++++++- tests/transforms/test_gibbs_noise.py | 8 +++----- 5 files changed, 24 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index e5607ccb02..69cd1c657f 100644 --- a/README.md +++ b/README.md @@ -18,12 +18,13 @@ MONAI is a [PyTorch](https://pytorch.org/)-based, [open-source](https://github.com/Project-MONAI/MONAI/blob/dev/LICENSE) framework for deep learning in healthcare imaging, part of the [PyTorch Ecosystem](https://pytorch.org/ecosystem/). Its ambitions are as follows: + - Developing a community of academic, industrial and clinical researchers collaborating on a common foundation; - Creating state-of-the-art, end-to-end training workflows for healthcare imaging; - Providing researchers with the optimized and standardized way to create and evaluate deep learning models. - ## Features + > _Please see [the technical highlights](https://docs.monai.io/en/latest/highlights.html) and [What's New](https://docs.monai.io/en/latest/whatsnew.html) of the milestone releases._ - flexible pre-processing for multi-dimensional medical imaging data; diff --git a/tests/bundle/test_bundle_trt_export.py b/tests/bundle/test_bundle_trt_export.py index a7c570438d..5168fcfdb5 100644 --- a/tests/bundle/test_bundle_trt_export.py +++ b/tests/bundle/test_bundle_trt_export.py @@ -70,7 +70,7 @@ def tearDown(self): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) @unittest.skipUnless(has_torchtrt and has_tensorrt, "Torch-TensorRT is required for conversion!") def test_trt_export(self, convert_precision, input_shape, dynamic_batch): - tests_dir = Path(__file__).resolve().parent + tests_dir = Path(__file__).resolve().parents[1] meta_file = os.path.join(tests_dir, "testing_data", "metadata.json") config_file = os.path.join(tests_dir, "testing_data", "inference.json") with tempfile.TemporaryDirectory() as tempdir: diff --git a/tests/networks/test_convert_to_onnx.py b/tests/networks/test_convert_to_onnx.py index cfc356d5a4..106f15dc9d 100644 --- a/tests/networks/test_convert_to_onnx.py +++ b/tests/networks/test_convert_to_onnx.py @@ -64,7 +64,7 @@ def test_unet(self, device, use_trace, use_ort): rtol=rtol, atol=atol, ) - self.assertTrue(isinstance(onnx_model, onnx.ModelProto)) + self.assertTrue(isinstance(onnx_model, onnx.ModelProto)) @parameterized.expand(TESTS_ORT) @SkipIfBeforePyTorchVersion((1, 12)) diff --git a/tests/test_utils.py b/tests/test_utils.py index c494bb547c..97a3181c44 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -30,9 +30,10 @@ import warnings from contextlib import contextmanager from functools import partial, reduce +from itertools import product from pathlib import Path from subprocess import PIPE, Popen -from typing import Callable +from typing import Callable, Literal from urllib.error import ContentTooShortError, HTTPError import numpy as np @@ -862,6 +863,21 @@ def equal_state_dict(st_1, st_2): if torch.cuda.is_available(): TEST_DEVICES.append([torch.device("cuda")]) + +def dict_product(trailing=False, format: Literal["list", "dict"] = "dict", **items): + keys = items.keys() + values = items.values() + for pvalues in product(*values): + dict_comb = dict(zip(keys, pvalues)) + if format == "dict": + if trailing: + yield [dict_comb] + list(pvalues) + else: + yield dict_comb + else: + yield pvalues + + if __name__ == "__main__": parser = argparse.ArgumentParser(prog="util") parser.add_argument("-c", "--count", default=2, help="max number of gpus") diff --git a/tests/transforms/test_gibbs_noise.py b/tests/transforms/test_gibbs_noise.py index 2aa2a44d10..1f96595a26 100644 --- a/tests/transforms/test_gibbs_noise.py +++ b/tests/transforms/test_gibbs_noise.py @@ -21,14 +21,12 @@ from monai.transforms import GibbsNoise from monai.utils.misc import set_determinism from monai.utils.module import optional_import -from tests.test_utils import TEST_NDARRAYS, assert_allclose +from tests.test_utils import TEST_NDARRAYS, assert_allclose, dict_product _, has_torch_fft = optional_import("torch.fft", name="fftshift") -TEST_CASES = [] -for shape in ((128, 64), (64, 48, 80)): - for input_type in TEST_NDARRAYS if has_torch_fft else [np.array]: - TEST_CASES.append((shape, input_type)) +params = {"shape": ((128, 64), (64, 48, 80)), "input_type": TEST_NDARRAYS if has_torch_fft else [np.array]} +TEST_CASES = list(dict_product(format="list", **params)) class TestGibbsNoise(unittest.TestCase): From a9a7082c7cd72b729df121fa71bfbd6cb85cb4a7 Mon Sep 17 00:00:00 2001 From: Rafael Garcia-Dias Date: Fri, 21 Feb 2025 16:02:52 +0000 Subject: [PATCH 21/55] 8354 fix path at test onnx trt export (#8361) Fixes #8354 ### Description A few sentences describing the changes proposed in this pull request. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: R. Garcia-Dias Signed-off-by: Can-Zhao --- README.md | 31 ++++++++++++++------------ tests/bundle/test_bundle_trt_export.py | 2 +- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 69cd1c657f..5e006f5d64 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,6 @@ Its ambitions are as follows: - customizable design for varying user expertise; - multi-GPU multi-node data parallelism support. - ## Installation To install [the current release](https://pypi.org/project/monai/), you can simply run: @@ -54,30 +53,34 @@ Technical documentation is available at [docs.monai.io](https://docs.monai.io). ## Citation -If you have used MONAI in your research, please cite us! The citation can be exported from: https://arxiv.org/abs/2211.02701. +If you have used MONAI in your research, please cite us! The citation can be exported from: . ## Model Zoo + [The MONAI Model Zoo](https://github.com/Project-MONAI/model-zoo) is a place for researchers and data scientists to share the latest and great models from the community. Utilizing [the MONAI Bundle format](https://docs.monai.io/en/latest/bundle_intro.html) makes it easy to [get started](https://github.com/Project-MONAI/tutorials/tree/main/model_zoo) building workflows with MONAI. ## Contributing + For guidance on making a contribution to MONAI, see the [contributing guidelines](https://github.com/Project-MONAI/MONAI/blob/dev/CONTRIBUTING.md). ## Community + Join the conversation on Twitter/X [@ProjectMONAI](https://twitter.com/ProjectMONAI) or join our [Slack channel](https://forms.gle/QTxJq3hFictp31UM9). Ask and answer questions over on [MONAI's GitHub Discussions tab](https://github.com/Project-MONAI/MONAI/discussions). ## Links -- Website: https://monai.io/ -- API documentation (milestone): https://docs.monai.io/ -- API documentation (latest dev): https://docs.monai.io/en/latest/ -- Code: https://github.com/Project-MONAI/MONAI -- Project tracker: https://github.com/Project-MONAI/MONAI/projects -- Issue tracker: https://github.com/Project-MONAI/MONAI/issues -- Wiki: https://github.com/Project-MONAI/MONAI/wiki -- Test status: https://github.com/Project-MONAI/MONAI/actions -- PyPI package: https://pypi.org/project/monai/ -- conda-forge: https://anaconda.org/conda-forge/monai -- Weekly previews: https://pypi.org/project/monai-weekly/ -- Docker Hub: https://hub.docker.com/r/projectmonai/monai + +- Website: +- API documentation (milestone): +- API documentation (latest dev): +- Code: +- Project tracker: +- Issue tracker: +- Wiki: +- Test status: +- PyPI package: +- conda-forge: +- Weekly previews: +- Docker Hub: diff --git a/tests/bundle/test_bundle_trt_export.py b/tests/bundle/test_bundle_trt_export.py index 5168fcfdb5..730338ad4e 100644 --- a/tests/bundle/test_bundle_trt_export.py +++ b/tests/bundle/test_bundle_trt_export.py @@ -108,7 +108,7 @@ def test_trt_export(self, convert_precision, input_shape, dynamic_batch): has_onnx and has_torchtrt and has_tensorrt, "Onnx and TensorRT are required for onnx-trt conversion!" ) def test_onnx_trt_export(self, convert_precision, input_shape, dynamic_batch): - tests_dir = Path(__file__).resolve().parent + tests_dir = Path(__file__).resolve().parents[1] meta_file = os.path.join(tests_dir, "testing_data", "metadata.json") config_file = os.path.join(tests_dir, "testing_data", "inference.json") with tempfile.TemporaryDirectory() as tempdir: From cf9fb59b368bcd02e0547426b79d6b074efae806 Mon Sep 17 00:00:00 2001 From: Virginia Fernandez <61539159+virginiafdez@users.noreply.github.com> Date: Mon, 24 Feb 2025 10:11:58 +0000 Subject: [PATCH 22/55] =?UTF-8?q?Modify=20ControlNet=20inferer=20so=20that?= =?UTF-8?q?=20it=20takes=20in=20context=20when=20the=20diffus=E2=80=A6=20(?= =?UTF-8?q?#8360)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #8344 ### Description The ControlNet inferers (latent and not latent) work in such a way that, when conditioning is used, the ControlNet does not take in the conditioning. It should, in theory, exhibit the same behaviour as the diffusion model. I've changed this behaviour, which has included modifying ControlNetDiffusionInferer and ControlNetLatentDiffusionInferer; the methods call, sample and get_likelihood. I've also modified the tests to take this into account. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [x] New tests added to cover the changes (modified, rather than new) - [x] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. Signed-off-by: Virginia Fernandez Co-authored-by: Virginia Fernandez Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Co-authored-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/inferers/inferer.py | 40 ++++++++++++++++------ tests/inferers/test_controlnet_inferers.py | 9 +++++ 2 files changed, 38 insertions(+), 11 deletions(-) diff --git a/monai/inferers/inferer.py b/monai/inferers/inferer.py index 810afa0be3..f779f279d6 100644 --- a/monai/inferers/inferer.py +++ b/monai/inferers/inferer.py @@ -1345,13 +1345,15 @@ def __call__( # type: ignore[override] raise NotImplementedError(f"{mode} condition is not supported") noisy_image = self.scheduler.add_noise(original_samples=inputs, noise=noise, timesteps=timesteps) - down_block_res_samples, mid_block_res_sample = controlnet( - x=noisy_image, timesteps=timesteps, controlnet_cond=cn_cond - ) + if mode == "concat" and condition is not None: noisy_image = torch.cat([noisy_image, condition], dim=1) condition = None + down_block_res_samples, mid_block_res_sample = controlnet( + x=noisy_image, timesteps=timesteps, controlnet_cond=cn_cond, context=condition + ) + diffuse = diffusion_model if isinstance(diffusion_model, SPADEDiffusionModelUNet): diffuse = partial(diffusion_model, seg=seg) @@ -1407,17 +1409,21 @@ def sample( # type: ignore[override] progress_bar = iter(scheduler.timesteps) intermediates = [] for t in progress_bar: - # 1. ControlNet forward - down_block_res_samples, mid_block_res_sample = controlnet( - x=image, timesteps=torch.Tensor((t,)).to(input_noise.device), controlnet_cond=cn_cond - ) - # 2. predict noise model_output diffuse = diffusion_model if isinstance(diffusion_model, SPADEDiffusionModelUNet): diffuse = partial(diffusion_model, seg=seg) if mode == "concat" and conditioning is not None: + # 1. Conditioning model_input = torch.cat([image, conditioning], dim=1) + # 2. ControlNet forward + down_block_res_samples, mid_block_res_sample = controlnet( + x=model_input, + timesteps=torch.Tensor((t,)).to(input_noise.device), + controlnet_cond=cn_cond, + context=None, + ) + # 3. predict noise model_output model_output = diffuse( model_input, timesteps=torch.Tensor((t,)).to(input_noise.device), @@ -1426,6 +1432,12 @@ def sample( # type: ignore[override] mid_block_additional_residual=mid_block_res_sample, ) else: + down_block_res_samples, mid_block_res_sample = controlnet( + x=image, + timesteps=torch.Tensor((t,)).to(input_noise.device), + controlnet_cond=cn_cond, + context=conditioning, + ) model_output = diffuse( image, timesteps=torch.Tensor((t,)).to(input_noise.device), @@ -1496,9 +1508,6 @@ def get_likelihood( # type: ignore[override] for t in progress_bar: timesteps = torch.full(inputs.shape[:1], t, device=inputs.device).long() noisy_image = self.scheduler.add_noise(original_samples=inputs, noise=noise, timesteps=timesteps) - down_block_res_samples, mid_block_res_sample = controlnet( - x=noisy_image, timesteps=torch.Tensor((t,)).to(inputs.device), controlnet_cond=cn_cond - ) diffuse = diffusion_model if isinstance(diffusion_model, SPADEDiffusionModelUNet): @@ -1506,6 +1515,9 @@ def get_likelihood( # type: ignore[override] if mode == "concat" and conditioning is not None: noisy_image = torch.cat([noisy_image, conditioning], dim=1) + down_block_res_samples, mid_block_res_sample = controlnet( + x=noisy_image, timesteps=torch.Tensor((t,)).to(inputs.device), controlnet_cond=cn_cond, context=None + ) model_output = diffuse( noisy_image, timesteps=timesteps, @@ -1514,6 +1526,12 @@ def get_likelihood( # type: ignore[override] mid_block_additional_residual=mid_block_res_sample, ) else: + down_block_res_samples, mid_block_res_sample = controlnet( + x=noisy_image, + timesteps=torch.Tensor((t,)).to(inputs.device), + controlnet_cond=cn_cond, + context=conditioning, + ) model_output = diffuse( x=noisy_image, timesteps=timesteps, diff --git a/tests/inferers/test_controlnet_inferers.py b/tests/inferers/test_controlnet_inferers.py index 2ab5cec335..909f2cf398 100644 --- a/tests/inferers/test_controlnet_inferers.py +++ b/tests/inferers/test_controlnet_inferers.py @@ -550,6 +550,8 @@ def test_ddim_sampler(self, model_params, controlnet_params, input_shape): def test_sampler_conditioned(self, model_params, controlnet_params, input_shape): model_params["with_conditioning"] = True model_params["cross_attention_dim"] = 3 + controlnet_params["with_conditioning"] = True + controlnet_params["cross_attention_dim"] = 3 model = DiffusionModelUNet(**model_params) controlnet = ControlNet(**controlnet_params) device = "cuda:0" if torch.cuda.is_available() else "cpu" @@ -619,8 +621,11 @@ def test_sampler_conditioned_concat(self, model_params, controlnet_params, input model_params = model_params.copy() n_concat_channel = 2 model_params["in_channels"] = model_params["in_channels"] + n_concat_channel + controlnet_params["in_channels"] = controlnet_params["in_channels"] + n_concat_channel model_params["cross_attention_dim"] = None + controlnet_params["cross_attention_dim"] = None model_params["with_conditioning"] = False + controlnet_params["with_conditioning"] = False model = DiffusionModelUNet(**model_params) device = "cuda:0" if torch.cuda.is_available() else "cpu" model.to(device) @@ -1023,8 +1028,10 @@ def test_prediction_shape_conditioned_concat( if ae_model_type == "SPADEAutoencoderKL": stage_1 = SPADEAutoencoderKL(**autoencoder_params) stage_2_params = stage_2_params.copy() + controlnet_params = controlnet_params.copy() n_concat_channel = 3 stage_2_params["in_channels"] = stage_2_params["in_channels"] + n_concat_channel + controlnet_params["in_channels"] = controlnet_params["in_channels"] + n_concat_channel if dm_model_type == "SPADEDiffusionModelUNet": stage_2 = SPADEDiffusionModelUNet(**stage_2_params) else: @@ -1106,8 +1113,10 @@ def test_sample_shape_conditioned_concat( if ae_model_type == "SPADEAutoencoderKL": stage_1 = SPADEAutoencoderKL(**autoencoder_params) stage_2_params = stage_2_params.copy() + controlnet_params = controlnet_params.copy() n_concat_channel = 3 stage_2_params["in_channels"] = stage_2_params["in_channels"] + n_concat_channel + controlnet_params["in_channels"] = controlnet_params["in_channels"] + n_concat_channel if dm_model_type == "SPADEDiffusionModelUNet": stage_2 = SPADEDiffusionModelUNet(**stage_2_params) else: From 4b4d92cf79b3a9a7486088499f0800ae145822f5 Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Tue, 25 Feb 2025 22:59:12 +0800 Subject: [PATCH 23/55] Update monaihosting download method (#8364) Related to https://github.com/Project-MONAI/model-zoo/pull/723. ### Description Currently, bundle download on source "monaihosting" uses fixed download url according to the function `_get_monaihosting_bundle_url`. A possible enhancement if to support on bundles that are hosted in different places. ### Types of changes - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: Yiheng Wang Co-authored-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/bundle/scripts.py | 55 +++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 16 deletions(-) diff --git a/monai/bundle/scripts.py b/monai/bundle/scripts.py index 5089f0c045..b43f7e0fa0 100644 --- a/monai/bundle/scripts.py +++ b/monai/bundle/scripts.py @@ -15,6 +15,7 @@ import json import os import re +import urllib import warnings import zipfile from collections.abc import Mapping, Sequence @@ -58,7 +59,7 @@ validate, _ = optional_import("jsonschema", name="validate") ValidationError, _ = optional_import("jsonschema.exceptions", name="ValidationError") Checkpoint, has_ignite = optional_import("ignite.handlers", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Checkpoint") -requests_get, has_requests = optional_import("requests", name="get") +requests, has_requests = optional_import("requests") onnx, _ = optional_import("onnx") huggingface_hub, _ = optional_import("huggingface_hub") @@ -206,6 +207,16 @@ def _download_from_monaihosting(download_path: Path, filename: str, version: str extractall(filepath=filepath, output_dir=download_path, has_base=True) +def _download_from_bundle_info(download_path: Path, filename: str, version: str, progress: bool) -> None: + bundle_info = get_bundle_info(bundle_name=filename, version=version) + if not bundle_info: + raise ValueError(f"Bundle info not found for {filename} v{version}.") + url = bundle_info["browser_download_url"] + filepath = download_path / f"{filename}_v{version}.zip" + download_url(url=url, filepath=filepath, hash_val=None, progress=progress) + extractall(filepath=filepath, output_dir=download_path, has_base=True) + + def _add_ngc_prefix(name: str, prefix: str = "monai_") -> str: if name.startswith(prefix): return name @@ -222,7 +233,7 @@ def _get_all_download_files(request_url: str, headers: dict | None = None) -> li if not has_requests: raise ValueError("requests package is required, please install it.") headers = {} if headers is None else headers - response = requests_get(request_url, headers=headers) + response = requests.get(request_url, headers=headers) response.raise_for_status() model_info = json.loads(response.text) @@ -266,7 +277,7 @@ def _download_from_ngc_private( request_url = _get_ngc_private_bundle_url(model_name=filename, version=version, repo=repo) if has_requests: headers = {} if headers is None else headers - response = requests_get(request_url, headers=headers) + response = requests.get(request_url, headers=headers) response.raise_for_status() else: raise ValueError("NGC API requires requests package. Please install it.") @@ -289,7 +300,7 @@ def _get_ngc_token(api_key, retry=0): url = "https://authn.nvidia.com/token?service=ngc" headers = {"Accept": "application/json", "Authorization": "ApiKey " + api_key} if has_requests: - response = requests_get(url, headers=headers) + response = requests.get(url, headers=headers) if not response.ok: # retry 3 times, if failed, raise an error. if retry < 3: @@ -303,14 +314,17 @@ def _get_ngc_token(api_key, retry=0): def _get_latest_bundle_version_monaihosting(name): full_url = f"{MONAI_HOSTING_BASE_URL}/{name.lower()}" - requests_get, has_requests = optional_import("requests", name="get") if has_requests: - resp = requests_get(full_url) - resp.raise_for_status() - else: - raise ValueError("NGC API requires requests package. Please install it.") - model_info = json.loads(resp.text) - return model_info["model"]["latestVersionIdStr"] + resp = requests.get(full_url) + try: + resp.raise_for_status() + model_info = json.loads(resp.text) + return model_info["model"]["latestVersionIdStr"] + except requests.exceptions.HTTPError: + # for monaihosting bundles, if cannot find the version, get from model zoo model_info.json + return get_bundle_versions(name)["latest_version"] + + raise ValueError("NGC API requires requests package. Please install it.") def _examine_monai_version(monai_version: str) -> tuple[bool, str]: @@ -388,14 +402,14 @@ def _get_latest_bundle_version_ngc(name: str, repo: str | None = None, headers: version_header = {"Accept-Encoding": "gzip, deflate"} # Excluding 'zstd' to fit NGC requirements if headers: version_header.update(headers) - resp = requests_get(version_endpoint, headers=version_header) + resp = requests.get(version_endpoint, headers=version_header) resp.raise_for_status() model_info = json.loads(resp.text) latest_versions = _list_latest_versions(model_info) for version in latest_versions: file_endpoint = base_url + f"/{name.lower()}/versions/{version}/files/configs/metadata.json" - resp = requests_get(file_endpoint, headers=headers) + resp = requests.get(file_endpoint, headers=headers) metadata = json.loads(resp.text) resp.raise_for_status() # if the package version is not available or the model is compatible with the package version @@ -585,7 +599,16 @@ def download( name_ver = "_v".join([name_, version_]) if version_ is not None else name_ _download_from_github(repo=repo_, download_path=bundle_dir_, filename=name_ver, progress=progress_) elif source_ == "monaihosting": - _download_from_monaihosting(download_path=bundle_dir_, filename=name_, version=version_, progress=progress_) + try: + _download_from_monaihosting( + download_path=bundle_dir_, filename=name_, version=version_, progress=progress_ + ) + except urllib.error.HTTPError: + # for monaihosting bundles, if cannot download from default host, download according to bundle_info + _download_from_bundle_info( + download_path=bundle_dir_, filename=name_, version=version_, progress=progress_ + ) + elif source_ == "ngc": _download_from_ngc( download_path=bundle_dir_, @@ -792,9 +815,9 @@ def _get_all_bundles_info( if auth_token is not None: headers = {"Authorization": f"Bearer {auth_token}"} - resp = requests_get(request_url, headers=headers) + resp = requests.get(request_url, headers=headers) else: - resp = requests_get(request_url) + resp = requests.get(request_url) resp.raise_for_status() else: raise ValueError("requests package is required, please install it.") From 092978c9445dd01c707a82c009dfc875eb56797c Mon Sep 17 00:00:00 2001 From: James Butler Date: Tue, 4 Mar 2025 10:33:31 -0500 Subject: [PATCH 24/55] Bump torch minimum to mitigate CVE-2024-31580 & CVE-2024-31583 and enable numpy 2 compatibility (#8368) This is a follow-up to the comments made in https://github.com/Project-MONAI/MONAI/pull/8296#issuecomment-2587338931. ### Description This bumps the minimum required `torch` version from 1.13.1 to 2.2.0 in the first commit. See https://github.com/advisories/GHSA-5pcm-hx3q-hm94 and https://github.com/advisories/GHSA-pg7h-5qx3-wjr3 for more details regarding the "High" severity scoring. - https://nvd.nist.gov/vuln/detail/CVE-2024-31580 - https://nvd.nist.gov/vuln/detail/CVE-2024-31583 Additionally, PyTorch added support for numpy 2 starting with PyTorch 2.3.0. The second commit in this PR allows for numpy 1 or numpy 2 to be used with torch>=2.3.0. I have included this commit in this PR as upgrading to torch 2.2 means you might as well update to 2.3 to get the numpy 2 compatibility. A special case is being handled on Windows as PyTorch Windows binaries had compatibilities issues with numpy 2 that were fixed in torch 2.4.1 (see https://github.com/pytorch/pytorch/issues/131668#issuecomment-2307447045). Maintainers will need to update the required status checks for the [`dev`](https://github.com/Project-MONAI/MONAI/tree/dev) branch to: - Remove min-dep-pytorch (2.0.1) ### Types of changes - [X] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. --------- Signed-off-by: James Butler Signed-off-by: Can-Zhao --- .github/workflows/cron.yml | 10 +++---- .github/workflows/pythonapp-gpu.yml | 26 ++++++++++--------- .github/workflows/pythonapp-min.yml | 2 +- .github/workflows/pythonapp.yml | 6 ++--- docs/requirements.txt | 4 +-- environment-dev.yml | 4 +-- monai/engines/evaluator.py | 11 +++----- monai/engines/trainer.py | 10 ++----- monai/networks/blocks/crossattention.py | 7 +---- monai/networks/blocks/selfattention.py | 7 +---- monai/networks/blocks/upsample.py | 14 +++------- pyproject.toml | 2 +- requirements.txt | 5 ++-- setup.cfg | 5 ++-- .../test_integration_bundle_run.py | 6 ++--- tests/metrics/test_surface_dice.py | 6 ++--- tests/nonconfig_workflow.py | 2 +- 17 files changed, 48 insertions(+), 79 deletions(-) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 2e7921ec94..77fe9ca3a2 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -13,17 +13,13 @@ jobs: strategy: matrix: environment: - - "PT113+CUDA118" - - "PT210+CUDA121" + - "PT230+CUDA121" - "PT240+CUDA126" - "PTLATEST+CUDA126" include: # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes - - environment: PT113+CUDA118 - pytorch: "torch==1.13.1 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu121" - base: "nvcr.io/nvidia/pytorch:22.10-py3" # CUDA 11.8 - - environment: PT210+CUDA121 - pytorch: "pytorch==2.1.0 torchvision==0.16.0 --extra-index-url https://download.pytorch.org/whl/cu121" + - environment: PT230+CUDA121 + pytorch: "pytorch==2.3.0 torchvision==0.18.0 --extra-index-url https://download.pytorch.org/whl/cu121" base: "nvcr.io/nvidia/pytorch:23.08-py3" # CUDA 12.1 - environment: PT240+CUDA126 pytorch: "pytorch==2.4.0 torchvision==0.19.0 --extra-index-url https://download.pytorch.org/whl/cu121" diff --git a/.github/workflows/pythonapp-gpu.yml b/.github/workflows/pythonapp-gpu.yml index cd916f2ebb..6b0a5084a2 100644 --- a/.github/workflows/pythonapp-gpu.yml +++ b/.github/workflows/pythonapp-gpu.yml @@ -22,19 +22,21 @@ jobs: strategy: matrix: environment: - - "PT113+CUDA116" - - "PT210+CUDA121DOCKER" + - "PT230+CUDA124DOCKER" + - "PT240+CUDA125DOCKER" + - "PT250+CUDA126DOCKER" include: # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes - - environment: PT113+CUDA116 - pytorch: "torch==1.13.1 torchvision==0.14.1" - base: "nvcr.io/nvidia/cuda:11.6.1-devel-ubuntu18.04" - - environment: PT210+CUDA121DOCKER - # 23.08: 2.1.0a0+29c30b1 + - environment: PT230+CUDA124DOCKER + # 24.04: 2.3.0a0+6ddf5cf85e pytorch: "-h" # we explicitly set pytorch to -h to avoid pip install error - base: "nvcr.io/nvidia/pytorch:23.08-py3" - - environment: PT210+CUDA121DOCKER - # 24.08: 2.3.0a0+40ec155e58.nv24.3 + base: "nvcr.io/nvidia/pytorch:24.04-py3" + - environment: PT240+CUDA125DOCKER + # 24.06: 2.4.0a0+f70bd71a48 + pytorch: "-h" # we explicitly set pytorch to -h to avoid pip install error + base: "nvcr.io/nvidia/pytorch:24.06-py3" + - environment: PT250+CUDA126DOCKER + # 24.08: 2.5.0a0+872d972e41 pytorch: "-h" # we explicitly set pytorch to -h to avoid pip install error base: "nvcr.io/nvidia/pytorch:24.08-py3" container: @@ -49,7 +51,7 @@ jobs: apt-get update apt-get install -y wget - if [ ${{ matrix.environment }} = "PT113+CUDA116" ] + if [ ${{ matrix.environment }} = "PT230+CUDA124" ] then PYVER=3.9 PYSFX=3 DISTUTILS=python3-distutils && \ apt-get update && apt-get install -y --no-install-recommends \ @@ -114,7 +116,7 @@ jobs: # build for the current self-hosted CI Tesla V100 BUILD_MONAI=1 TORCH_CUDA_ARCH_LIST="7.0" ./runtests.sh --build --disttests ./runtests.sh --quick --unittests - if [ ${{ matrix.environment }} = "PT113+CUDA116" ]; then + if [ ${{ matrix.environment }} = "PT230+CUDA124" ]; then # test the clang-format tool downloading once coverage run -m tests.clang_format_utils fi diff --git a/.github/workflows/pythonapp-min.yml b/.github/workflows/pythonapp-min.yml index 19e30f86bb..afc9f6f6d4 100644 --- a/.github/workflows/pythonapp-min.yml +++ b/.github/workflows/pythonapp-min.yml @@ -124,7 +124,7 @@ jobs: strategy: fail-fast: false matrix: - pytorch-version: ['1.13.1', '2.0.1', '2.2.2', '2.3.1', '2.4.1', 'latest'] + pytorch-version: ['2.3.1', '2.4.1', '2.5.1', 'latest'] timeout-minutes: 40 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index f175cc3f7c..5d6fd06afa 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -94,7 +94,7 @@ jobs: - if: runner.os == 'windows' name: Install torch cpu from pytorch.org (Windows only) run: | - python -m pip install torch==1.13.1+cpu torchvision==0.14.1+cpu -f https://download.pytorch.org/whl/torch_stable.html + python -m pip install torch==2.4.1 torchvision==0.19.1+cpu --index-url https://download.pytorch.org/whl/cpu - if: runner.os == 'Linux' name: Install itk pre-release (Linux only) run: | @@ -103,7 +103,7 @@ jobs: - name: Install the dependencies run: | python -m pip install --user --upgrade pip wheel - python -m pip install torch==1.13.1 torchvision==0.14.1 + python -m pip install torch==2.4.1 torchvision==0.19.1 cat "requirements-dev.txt" python -m pip install -r requirements-dev.txt python -m pip list @@ -155,7 +155,7 @@ jobs: # install the latest pytorch for testing # however, "pip install monai*.tar.gz" will build cpp/cuda with an isolated # fresh torch installation according to pyproject.toml - python -m pip install torch>=1.13.1 torchvision + python -m pip install torch>=2.3.0 torchvision - name: Check packages run: | pip uninstall monai diff --git a/docs/requirements.txt b/docs/requirements.txt index d657580743..b314e10640 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ --f https://download.pytorch.org/whl/cpu/torch-1.13.1%2Bcpu-cp39-cp39-linux_x86_64.whl -torch>=1.13.1 +-f https://download.pytorch.org/whl/cpu/torch-2.3.0%2Bcpu-cp39-cp39-linux_x86_64.whl +torch>=2.3.0 pytorch-ignite==0.4.11 numpy>=1.20 itk>=5.2 diff --git a/environment-dev.yml b/environment-dev.yml index 8617a3b9cb..9358cdc83b 100644 --- a/environment-dev.yml +++ b/environment-dev.yml @@ -5,8 +5,8 @@ channels: - nvidia - conda-forge dependencies: - - numpy>=1.24,<2.0 - - pytorch>=1.13.1 + - numpy>=1.24,<3.0 + - pytorch>=2.3.0 - torchio - torchvision - pytorch-cuda>=11.6 diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index d70a39726b..35d4928465 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -28,7 +28,7 @@ from monai.utils import ForwardMode, IgniteInfo, ensure_tuple, min_version, optional_import from monai.utils.enums import CommonKeys as Keys from monai.utils.enums import EngineStatsKeys as ESKeys -from monai.utils.module import look_up_option, pytorch_after +from monai.utils.module import look_up_option if TYPE_CHECKING: from ignite.engine import Engine, EventEnum @@ -269,13 +269,8 @@ def __init__( amp_kwargs=amp_kwargs, ) if compile: - if pytorch_after(2, 1): - compile_kwargs = {} if compile_kwargs is None else compile_kwargs - network = torch.compile(network, **compile_kwargs) # type: ignore[assignment] - else: - warnings.warn( - "Network compilation (compile=True) not supported for Pytorch versions before 2.1, no compilation done" - ) + compile_kwargs = {} if compile_kwargs is None else compile_kwargs + network = torch.compile(network, **compile_kwargs) # type: ignore[assignment] self.network = network self.compile = compile self.inferer = SimpleInferer() if inferer is None else inferer diff --git a/monai/engines/trainer.py b/monai/engines/trainer.py index a0be86bae5..fdb45fbab8 100644 --- a/monai/engines/trainer.py +++ b/monai/engines/trainer.py @@ -27,7 +27,6 @@ from monai.utils import AdversarialIterationEvents, AdversarialKeys, GanKeys, IgniteInfo, min_version, optional_import from monai.utils.enums import CommonKeys as Keys from monai.utils.enums import EngineStatsKeys as ESKeys -from monai.utils.module import pytorch_after if TYPE_CHECKING: from ignite.engine import Engine, EventEnum @@ -183,13 +182,8 @@ def __init__( amp_kwargs=amp_kwargs, ) if compile: - if pytorch_after(2, 1): - compile_kwargs = {} if compile_kwargs is None else compile_kwargs - network = torch.compile(network, **compile_kwargs) # type: ignore[assignment] - else: - warnings.warn( - "Network compilation (compile=True) not supported for Pytorch versions before 2.1, no compilation done" - ) + compile_kwargs = {} if compile_kwargs is None else compile_kwargs + network = torch.compile(network, **compile_kwargs) # type: ignore[assignment] self.network = network self.compile = compile self.optimizer = optimizer diff --git a/monai/networks/blocks/crossattention.py b/monai/networks/blocks/crossattention.py index bdecf63168..be31d2d8fb 100644 --- a/monai/networks/blocks/crossattention.py +++ b/monai/networks/blocks/crossattention.py @@ -17,7 +17,7 @@ import torch.nn as nn from monai.networks.layers.utils import get_rel_pos_embedding_layer -from monai.utils import optional_import, pytorch_after +from monai.utils import optional_import Rearrange, _ = optional_import("einops.layers.torch", name="Rearrange") @@ -84,11 +84,6 @@ def __init__( if causal and sequence_length is None: raise ValueError("sequence_length is necessary for causal attention.") - if use_flash_attention and not pytorch_after(minor=13, major=1, patch=0): - raise ValueError( - "use_flash_attention is only supported for PyTorch versions >= 2.0." - "Upgrade your PyTorch or set the flag to False." - ) if use_flash_attention and save_attn: raise ValueError( "save_attn has been set to True, but use_flash_attention is also set" diff --git a/monai/networks/blocks/selfattention.py b/monai/networks/blocks/selfattention.py index 86e1b1d3ae..360579f3df 100644 --- a/monai/networks/blocks/selfattention.py +++ b/monai/networks/blocks/selfattention.py @@ -18,7 +18,7 @@ import torch.nn.functional as F from monai.networks.layers.utils import get_rel_pos_embedding_layer -from monai.utils import optional_import, pytorch_after +from monai.utils import optional_import Rearrange, _ = optional_import("einops.layers.torch", name="Rearrange") @@ -90,11 +90,6 @@ def __init__( if causal and sequence_length is None: raise ValueError("sequence_length is necessary for causal attention.") - if use_flash_attention and not pytorch_after(minor=13, major=1, patch=0): - raise ValueError( - "use_flash_attention is only supported for PyTorch versions >= 2.0." - "Upgrade your PyTorch or set the flag to False." - ) if use_flash_attention and save_attn: raise ValueError( "save_attn has been set to True, but use_flash_attention is also set" diff --git a/monai/networks/blocks/upsample.py b/monai/networks/blocks/upsample.py index 50fd39a70b..62908e9825 100644 --- a/monai/networks/blocks/upsample.py +++ b/monai/networks/blocks/upsample.py @@ -17,8 +17,8 @@ import torch.nn as nn from monai.networks.layers.factories import Conv, Pad, Pool -from monai.networks.utils import CastTempType, icnr_init, pixelshuffle -from monai.utils import InterpolateMode, UpsampleMode, ensure_tuple_rep, look_up_option, pytorch_after +from monai.networks.utils import icnr_init, pixelshuffle +from monai.utils import InterpolateMode, UpsampleMode, ensure_tuple_rep, look_up_option __all__ = ["Upsample", "UpSample", "SubpixelUpsample", "Subpixelupsample", "SubpixelUpSample"] @@ -164,15 +164,7 @@ def __init__( align_corners=align_corners, ) - # Cast to float32 as 'upsample_nearest2d_out_frame' op does not support bfloat16 - # https://github.com/pytorch/pytorch/issues/86679. This issue is solved in PyTorch 2.1 - if pytorch_after(major=2, minor=1): - self.add_module("upsample_non_trainable", upsample) - else: - self.add_module( - "upsample_non_trainable", - CastTempType(initial_type=torch.bfloat16, temporary_type=torch.float32, submodule=upsample), - ) + self.add_module("upsample_non_trainable", upsample) if post_conv: self.add_module("postconv", post_conv) elif up_mode == UpsampleMode.PIXELSHUFFLE: diff --git a/pyproject.toml b/pyproject.toml index 8ad55b1c2c..588d6d22d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = [ "wheel", "setuptools", - "torch>=1.13.1", + "torch>=2.3.0", "ninja", "packaging" ] diff --git a/requirements.txt b/requirements.txt index 5203b43128..452a62adda 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ -torch>=1.13.1,<2.6 -numpy>=1.24,<2.0 +torch>=2.3.0,<2.6; sys_platform != 'win32' +torch>=2.4.1,<2.6; sys_platform == 'win32' +numpy>=1.24,<3.0 diff --git a/setup.cfg b/setup.cfg index 66d9e19609..2b06df64de 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,8 +42,9 @@ setup_requires = ninja packaging install_requires = - torch>=1.13.1 - numpy>=1.24,<2.0 + torch>=2.3.0; sys_platform != 'win32' + torch>=2.4.1; sys_platform == 'win32' + numpy>=1.24,<3.0 [options.extras_require] all = diff --git a/tests/integration/test_integration_bundle_run.py b/tests/integration/test_integration_bundle_run.py index cfbbcfe154..7f366d4745 100644 --- a/tests/integration/test_integration_bundle_run.py +++ b/tests/integration/test_integration_bundle_run.py @@ -76,8 +76,7 @@ def test_tiny(self): ) with open(meta_file, "w") as f: json.dump( - {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "1.13.1", "numpy_version": "1.22.2"}, - f, + {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "2.3.0", "numpy_version": "1.22.2"}, f ) cmd = ["coverage", "run", "-m", "monai.bundle"] # test both CLI entry "run" and "run_workflow" @@ -114,8 +113,7 @@ def test_scripts_fold(self): ) with open(meta_file, "w") as f: json.dump( - {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "1.13.1", "numpy_version": "1.22.2"}, - f, + {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "2.3.0", "numpy_version": "1.22.2"}, f ) os.mkdir(scripts_dir) diff --git a/tests/metrics/test_surface_dice.py b/tests/metrics/test_surface_dice.py index 01f80bd01e..a3d03e9937 100644 --- a/tests/metrics/test_surface_dice.py +++ b/tests/metrics/test_surface_dice.py @@ -82,7 +82,7 @@ def test_tolerance_euclidean_distance_with_spacing(self): expected_res0[1, 1] = np.nan for b, c in np.ndindex(batch_size, n_class): np.testing.assert_allclose(expected_res0[b, c], res0[b, c].cpu()) - np.testing.assert_array_equal(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) + np.testing.assert_allclose(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) np.testing.assert_equal(not_nans.cpu(), torch.tensor(2)) def test_tolerance_euclidean_distance(self): @@ -126,7 +126,7 @@ def test_tolerance_euclidean_distance(self): expected_res0[1, 1] = np.nan for b, c in np.ndindex(batch_size, n_class): np.testing.assert_allclose(expected_res0[b, c], res0[b, c].cpu()) - np.testing.assert_array_equal(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) + np.testing.assert_allclose(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) np.testing.assert_equal(not_nans.cpu(), torch.tensor(2)) def test_tolerance_euclidean_distance_3d(self): @@ -173,7 +173,7 @@ def test_tolerance_euclidean_distance_3d(self): expected_res0[1, 1] = np.nan for b, c in np.ndindex(batch_size, n_class): np.testing.assert_allclose(expected_res0[b, c], res0[b, c].cpu()) - np.testing.assert_array_equal(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) + np.testing.assert_allclose(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) np.testing.assert_equal(not_nans.cpu(), torch.tensor(2)) def test_tolerance_all_distances(self): diff --git a/tests/nonconfig_workflow.py b/tests/nonconfig_workflow.py index fcfc5b2951..bcbdc67b71 100644 --- a/tests/nonconfig_workflow.py +++ b/tests/nonconfig_workflow.py @@ -65,7 +65,7 @@ def initialize(self): self._monai_version = "1.1.0" if self._pytorch_version is None: - self._pytorch_version = "1.13.1" + self._pytorch_version = "2.3.0" if self._numpy_version is None: self._numpy_version = "1.22.2" From 784b19fd9c5d77585659d891637e66bc380fd4f9 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 16:51:25 +0000 Subject: [PATCH 25/55] add rectified flow for accelerated diffusion model Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 6a848f0762..d0657c54c7 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -97,7 +97,7 @@ class RFlowScheduler(Scheduler): ) # during training - inputs = torch.ones(2,4,64,64,64) + inputs = torch.ones(2,4,64,64,32) noise = torch.randn_like(inputs) timesteps = noise_scheduler.sample_timesteps(inputs) noisy_inputs = noise_scheduler.add_noise(original_samples=inputs, noise=noise, timesteps=timesteps) @@ -108,7 +108,7 @@ class RFlowScheduler(Scheduler): loss = loss_l1(predicted_velocity, (inputs - noise)) # during inference - noisy_inputs = torch.randn(2,4,64,64,64) + noisy_inputs = torch.randn(2,4,64,64,32) input_img_size_numel = torch.prod(torch.tensor(noisy_inputs.shape[-3:]) noise_scheduler.set_timesteps( num_inference_steps=30, input_img_size_numel=input_img_size_numel) From 28c3d6888fb4d1799a2b84e6b1de01a2101d918b Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 18:29:05 +0000 Subject: [PATCH 26/55] reformat Signed-off-by: Can-Zhao --- monai/utils/jupyter_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/utils/jupyter_utils.py b/monai/utils/jupyter_utils.py index b1b43a6767..c93e93dcb9 100644 --- a/monai/utils/jupyter_utils.py +++ b/monai/utils/jupyter_utils.py @@ -234,7 +234,7 @@ def plot_engine_status( def _get_loss_from_output( - output: list[torch.Tensor | dict[str, torch.Tensor]] | dict[str, torch.Tensor] | torch.Tensor + output: list[torch.Tensor | dict[str, torch.Tensor]] | dict[str, torch.Tensor] | torch.Tensor, ) -> torch.Tensor: """Returns a single value from the network output, which is a dict or tensor.""" From dc7b8a63c05082d3253d34fb57a845c0ae2c47de Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 18:54:00 +0000 Subject: [PATCH 27/55] reformat Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 22 ++++++++++----------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index d0657c54c7..995739c2aa 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -159,19 +159,17 @@ def __init__( self.transform_scale = transform_scale self.steps_offset = steps_offset - def add_noise( - self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor - ) -> torch.FloatTensor: + def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: """ - Adds noise to the original samples based on the given timesteps. + Add noise to the original samples. Args: - original_samples (torch.FloatTensor): The original sample tensor. - noise (torch.FloatTensor): Noise tensor to be added. - timesteps (torch.IntTensor): Timesteps corresponding to each sample. + original_samples: original samples + noise: noise to add to samples + timesteps: timesteps tensor indicating the timestep to be computed for each sample. Returns: - torch.FloatTensor: The noisy sample tensor. + noisy_samples: sample with added noise """ timepoints = timesteps.float() / self.num_train_timesteps timepoints = 1 - timepoints # [1,1/1000] @@ -221,10 +219,10 @@ def set_timesteps( ) for t in timesteps ] - timesteps = np.array(timesteps).astype(np.float16) + timesteps_np = np.array(timesteps).astype(np.float16) if self.use_discrete_timesteps: - timesteps = timesteps.astype(np.int64) - self.timesteps = torch.from_numpy(timesteps).to(device) + timesteps_np = timesteps_np.astype(np.int64) + self.timesteps = torch.from_numpy(timesteps_np).to(device) self.timesteps += self.steps_offset def sample_timesteps(self, x_start): @@ -257,7 +255,7 @@ def sample_timesteps(self, x_start): return t def step( - self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, next_timestep=None + self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, next_timestep: int | None = None ) -> tuple[torch.Tensor, Any]: """ Predict the sample at the previous timestep. Core function to propagate the diffusion From 0bbc0dde9f84858e80d0989a1b16e6a70ff39fa0 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 19:58:43 +0000 Subject: [PATCH 28/55] reformat Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 48 +++++++++++++-------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 995739c2aa..1e961ac847 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -28,7 +28,7 @@ from __future__ import annotations -from typing import Any +from typing import Any, Union import numpy as np import torch @@ -171,15 +171,16 @@ def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timeste Returns: noisy_samples: sample with added noise """ - timepoints = timesteps.float() / self.num_train_timesteps + timepoints: torch.Tensor = timesteps.float() / self.num_train_timesteps timepoints = 1 - timepoints # [1,1/1000] # timepoint (bsz) noise: (bsz, 4, frame, w ,h) # expand timepoint to noise shape timepoints = timepoints.unsqueeze(1).unsqueeze(1).unsqueeze(1).unsqueeze(1) timepoints = timepoints.repeat(1, noise.shape[1], noise.shape[2], noise.shape[3], noise.shape[4]) + noisy_samples: torch.Tensor = timepoints * original_samples + (1 - timepoints) * noise - return timepoints * original_samples + (1 - timepoints) * noise + return noisy_samples def set_timesteps( self, @@ -255,27 +256,38 @@ def sample_timesteps(self, x_start): return t def step( - self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, next_timestep: int | None = None - ) -> tuple[torch.Tensor, Any]: + self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, next_timestep: Union[int, None] = None + ) -> tuple[torch.Tensor, None]: """ - Predict the sample at the previous timestep. Core function to propagate the diffusion - process from the learned model outputs. + Predicts the next sample in the diffusion process. Args: - model_output: direct output from learned diffusion model. - timestep: current discrete timestep in the diffusion chain. - sample: current instance of sample being created by diffusion process. - next_timestep: next discrete timestep in the diffusion chain. + model_output (torch.Tensor): Output from the trained diffusion model. + timestep (int): Current timestep in the diffusion chain. + sample (torch.Tensor): Current sample in the process. + next_timestep (Union[int, None]): Optional next timestep. + Returns: - pred_prev_sample: Predicted previous sample - None + tuple[torch.Tensor, None]: Predicted sample at the next step and additional info. """ + # Ensure num_inference_steps exists and is a valid integer + if not hasattr(self, "num_inference_steps") or not isinstance(self.num_inference_steps, int): + raise AttributeError( + "num_inference_steps is missing or not an integer in the class." + "Please run self.set_timesteps(num_inference_steps,device,input_img_size_numel) to set it." + ) + v_pred = model_output - if next_timestep is None: - dt = 1.0 / self.num_inference_steps + + if next_timestep is not None: + next_timestep = int(next_timestep) + dt: float = ( + float(timestep - next_timestep) / self.num_train_timesteps + ) # Now next_timestep is guaranteed to be int else: - dt = timestep - next_timestep - dt = dt / self.num_train_timesteps - z = sample + v_pred * dt + dt = ( + 1.0 / float(self.num_inference_steps) if self.num_inference_steps > 0 else 0.0 + ) # Avoid division by zero + z = sample + v_pred * dt return z, None From c070581e1202f62fa3bc78db345f416485139acd Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 20:01:10 +0000 Subject: [PATCH 29/55] reformat Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 1e961ac847..795ca3d85b 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -196,9 +196,10 @@ def set_timesteps( device: target device to put the data. input_img_size_numel: int, H*W*D of the image, used with self.use_timestep_transform is True. """ - if num_inference_steps > self.num_train_timesteps: + if num_inference_steps > self.num_train_timesteps or num_inference_steps < 1: raise ValueError( - f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.num_train_timesteps`:" + f"`num_inference_steps`: {num_inference_steps} should be at least 1, " + "and cannot be larger than `self.num_train_timesteps`:" f" {self.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.num_train_timesteps} timesteps." ) From b036450ded7f4449f7db2f8be8d9df5a332116e1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 5 Mar 2025 19:59:18 +0000 Subject: [PATCH 30/55] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 795ca3d85b..26b908ce4c 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -28,7 +28,7 @@ from __future__ import annotations -from typing import Any, Union +from typing import Union import numpy as np import torch From 81663dbc269c79df0d503605c2bc874648202b28 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 21:10:29 +0000 Subject: [PATCH 31/55] add prev_original Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 26b908ce4c..27a4473d9f 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -258,7 +258,7 @@ def sample_timesteps(self, x_start): def step( self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, next_timestep: Union[int, None] = None - ) -> tuple[torch.Tensor, None]: + ) -> tuple[torch.Tensor, torch.Tensor]: """ Predicts the next sample in the diffusion process. @@ -290,5 +290,7 @@ def step( 1.0 / float(self.num_inference_steps) if self.num_inference_steps > 0 else 0.0 ) # Avoid division by zero - z = sample + v_pred * dt - return z, None + pred_post_sample = sample + v_pred * dt + pred_original_sample = sample + v_pred * timestep/self.num_train_timesteps + + return pred_post_sample, pred_original_sample From c314dbf13344254659279f6324f0ea05b8cf34e2 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 21:31:36 +0000 Subject: [PATCH 32/55] black Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 27a4473d9f..858f1c183a 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -291,6 +291,6 @@ def step( ) # Avoid division by zero pred_post_sample = sample + v_pred * dt - pred_original_sample = sample + v_pred * timestep/self.num_train_timesteps - + pred_original_sample = sample + v_pred * timestep / self.num_train_timesteps + return pred_post_sample, pred_original_sample From e7bb70d8b36433e36e754dcef4a04f1a2a7bf3b8 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 22:29:09 +0000 Subject: [PATCH 33/55] add doc Signed-off-by: Can-Zhao --- docs/source/networks.rst | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docs/source/networks.rst b/docs/source/networks.rst index e2e509a99b..6ba7577955 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -750,3 +750,27 @@ Utilities .. automodule:: monai.apps.reconstruction.networks.nets.utils :members: + +Noise Schedulers +---------------- +.. currentmodule:: monai.networks.schedulers + +`AHNet` +~~~~~~~ +.. autoclass:: Scheduler + :members: + +.. autoclass:: NoiseSchedules + :members: + +.. autoclass:: DDPMScheduler + :members: + +.. autoclass:: DDIMScheduler + :members: + +.. autoclass:: PNDMScheduler + :members: + +.. autoclass:: RFlowScheduler + :members: From b24af7033af851a565f3fdf7278e80693cc3bc93 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 22:36:29 +0000 Subject: [PATCH 34/55] add doc Signed-off-by: Can-Zhao --- docs/source/networks.rst | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/source/networks.rst b/docs/source/networks.rst index 6ba7577955..11e8ed1fb5 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -755,22 +755,32 @@ Noise Schedulers ---------------- .. currentmodule:: monai.networks.schedulers -`AHNet` -~~~~~~~ +`Scheduler` +~~~~~~~~~~~ .. autoclass:: Scheduler :members: +`NoiseSchedules` +~~~~~~~~~~~~~~~~ .. autoclass:: NoiseSchedules :members: +`DDPMScheduler` +~~~~~~~~~~~~~~~ .. autoclass:: DDPMScheduler :members: +`DDIMScheduler` +~~~~~~~~~~~~~~~ .. autoclass:: DDIMScheduler :members: +`PNDMScheduler` +~~~~~~~~~~~~~~~ .. autoclass:: PNDMScheduler :members: +`RFlowScheduler` +~~~~~~~~~~~~~~~~ .. autoclass:: RFlowScheduler :members: From 44997802a9f89f0256f15179aa624f78eae9f1ca Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 5 Mar 2025 22:53:17 +0000 Subject: [PATCH 35/55] add doc Signed-off-by: Can-Zhao --- docs/source/networks.rst | 1 + monai/networks/schedulers/rectified_flow.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/source/networks.rst b/docs/source/networks.rst index 11e8ed1fb5..0119c6db4d 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -753,6 +753,7 @@ Utilities Noise Schedulers ---------------- +.. automodule:: monai.networks.schedulers .. currentmodule:: monai.networks.schedulers `Scheduler` diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 858f1c183a..a3002f59b8 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -71,7 +71,7 @@ class RFlowScheduler(Scheduler): Supports uniform and logit-normal sampling methods, timestep transformation for different resolutions, and noise addition during diffusion. - Attributes: + Args: num_train_timesteps (int): Total number of training timesteps. use_discrete_timesteps (bool): Whether to use discrete timesteps. sample_method (str): Training time step sampling method ('uniform' or 'logit-normal'). From 74e0a9b8744014aa34e623236e70898f1c34324d Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Thu, 6 Mar 2025 17:18:04 +0000 Subject: [PATCH 36/55] update doc Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index a3002f59b8..5bdeae0931 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -269,7 +269,7 @@ def step( next_timestep (Union[int, None]): Optional next timestep. Returns: - tuple[torch.Tensor, None]: Predicted sample at the next step and additional info. + tuple[torch.Tensor, torch.Tensor]: Predicted sample at the next step and additional info. """ # Ensure num_inference_steps exists and is a valid integer if not hasattr(self, "num_inference_steps") or not isinstance(self.num_inference_steps, int): From fd8d7f5ad0fb087242014d71b761b3bf25b00940 Mon Sep 17 00:00:00 2001 From: Can Zhao <69829124+Can-Zhao@users.noreply.github.com> Date: Thu, 6 Feb 2025 12:03:20 -0800 Subject: [PATCH 37/55] Update autoencoderkl_maisi.py When there is no need to perform tensor parallel, skip all the unnecessary splitting steps Signed-off-by: Can-Zhao --- monai/apps/generation/maisi/networks/autoencoderkl_maisi.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py b/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py index 6251ea8e83..3ed9c03188 100644 --- a/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py +++ b/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py @@ -231,6 +231,10 @@ def _concatenate_tensors(self, outputs: list[torch.Tensor], split_size: int, pad def forward(self, x: torch.Tensor) -> torch.Tensor: if self.print_info: logger.info(f"Number of splits: {self.num_splits}") + + if self.dim_split<=1 and self.num_splits<=1: + x = self.conv(x) + return x # compute size of splits l = x.size(self.dim_split + 2) From ecdb8121e56ce17c5b2aa101d0870b4de669471f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 20:05:03 +0000 Subject: [PATCH 38/55] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Can-Zhao --- monai/apps/generation/maisi/networks/autoencoderkl_maisi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py b/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py index 3ed9c03188..141f77231e 100644 --- a/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py +++ b/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py @@ -231,7 +231,7 @@ def _concatenate_tensors(self, outputs: list[torch.Tensor], split_size: int, pad def forward(self, x: torch.Tensor) -> torch.Tensor: if self.print_info: logger.info(f"Number of splits: {self.num_splits}") - + if self.dim_split<=1 and self.num_splits<=1: x = self.conv(x) return x From 990985938a25dc629e0d609acdca9fe7e1867e2d Mon Sep 17 00:00:00 2001 From: Can Zhao <69829124+Can-Zhao@users.noreply.github.com> Date: Thu, 6 Feb 2025 22:34:25 -0800 Subject: [PATCH 39/55] Update autoencoderkl_maisi.py Signed-off-by: Can-Zhao --- monai/apps/generation/maisi/networks/autoencoderkl_maisi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py b/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py index 141f77231e..86b4e68864 100644 --- a/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py +++ b/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py @@ -232,7 +232,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: if self.print_info: logger.info(f"Number of splits: {self.num_splits}") - if self.dim_split<=1 and self.num_splits<=1: + if self.dim_split <= 1 and self.num_splits <= 1: x = self.conv(x) return x From 672674767547f0c649eb3f4c72aadf0484ee14cc Mon Sep 17 00:00:00 2001 From: Can Zhao <69829124+Can-Zhao@users.noreply.github.com> Date: Mon, 10 Feb 2025 20:29:50 -0800 Subject: [PATCH 40/55] DCO Remediation Commit for Can Zhao <69829124+Can-Zhao@users.noreply.github.com> I, Can Zhao <69829124+Can-Zhao@users.noreply.github.com>, hereby add my Signed-off-by to this commit: efdc623d2b15ae1b7610b47436c0843b0fd58a61 I, Can Zhao <69829124+Can-Zhao@users.noreply.github.com>, hereby add my Signed-off-by to this commit: 131f7e650fb85b0cf36f5eb8509ec3b242495357 Signed-off-by: Can Zhao <69829124+Can-Zhao@users.noreply.github.com> Signed-off-by: Can-Zhao --- monai/apps/generation/maisi/networks/autoencoderkl_maisi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py b/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py index 86b4e68864..aa25adc50b 100644 --- a/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py +++ b/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py @@ -234,7 +234,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: if self.dim_split <= 1 and self.num_splits <= 1: x = self.conv(x) - return x + return x # compute size of splits l = x.size(self.dim_split + 2) From 0ff30349159096c5d0b701c9cd6017e9ad638430 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 11 Feb 2025 04:30:12 +0000 Subject: [PATCH 41/55] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Can-Zhao --- monai/apps/generation/maisi/networks/autoencoderkl_maisi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py b/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py index aa25adc50b..86b4e68864 100644 --- a/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py +++ b/monai/apps/generation/maisi/networks/autoencoderkl_maisi.py @@ -234,7 +234,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: if self.dim_split <= 1 and self.num_splits <= 1: x = self.conv(x) - return x + return x # compute size of splits l = x.size(self.dim_split + 2) From 454496f2c4312910eca389d94aa86b9d3176f4af Mon Sep 17 00:00:00 2001 From: monai-bot <64792179+monai-bot@users.noreply.github.com> Date: Fri, 7 Mar 2025 17:16:37 +0000 Subject: [PATCH 42/55] Auto3DSeg algo_template hash update (#8378) Signed-off-by: monai-bot Signed-off-by: monai-bot Signed-off-by: Can-Zhao --- monai/utils/misc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/utils/misc.py b/monai/utils/misc.py index b96a48ad7e..3df549c362 100644 --- a/monai/utils/misc.py +++ b/monai/utils/misc.py @@ -546,7 +546,7 @@ def doc_images() -> str | None: @staticmethod def algo_hash() -> str | None: - return os.environ.get("MONAI_ALGO_HASH", "e4cf5a1") + return os.environ.get("MONAI_ALGO_HASH", "c970bdf") @staticmethod def trace_transform() -> str | None: From 2df46378bd308a7f95cf4735b1e7555feaabb683 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 14:32:23 +0000 Subject: [PATCH 43/55] rm redundant line Signed-off-by: Can-Zhao --- monai/inferers/inferer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/monai/inferers/inferer.py b/monai/inferers/inferer.py index f779f279d6..d35e777bcf 100644 --- a/monai/inferers/inferer.py +++ b/monai/inferers/inferer.py @@ -867,7 +867,6 @@ def sample( total=min(len(scheduler.timesteps), len(all_next_timesteps)), ) else: - progress_bar = iter(scheduler.timesteps) progress_bar = iter(zip(scheduler.timesteps, all_next_timesteps)) intermediates = [] From e428c38f30542ada93fc188837e6ca4b2b549806 Mon Sep 17 00:00:00 2001 From: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Date: Sat, 8 Mar 2025 00:14:24 +0000 Subject: [PATCH 44/55] Enable Pytorch 2.6 (#8309) Partially addresses #8303. This changes the maximum Numpy version to be below 3.0 for testing with 2.x compatibility. This appears to be resolved with newer versions of dependencies. This will also include fixes for Pytorch 2.6 mostly relating to `torch.load` and `autocast` usage. - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: Eric Kerfoot Signed-off-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- monai/apps/deepedit/interaction.py | 2 +- monai/apps/deepgrow/interaction.py | 2 +- .../detection/networks/retinanet_detector.py | 2 +- .../detection/networks/retinanet_network.py | 10 +- monai/apps/detection/utils/box_coder.py | 4 +- monai/apps/mmars/mmars.py | 2 +- .../networks/blocks/varnetblock.py | 2 +- monai/bundle/scripts.py | 7 +- monai/data/dataset.py | 11 +-- monai/data/utils.py | 2 +- monai/data/video_dataset.py | 2 +- monai/engines/evaluator.py | 16 ++-- monai/engines/trainer.py | 18 ++-- monai/engines/utils.py | 2 +- monai/engines/workflow.py | 4 +- monai/fl/client/monai_algo.py | 2 +- monai/handlers/checkpoint_loader.py | 2 +- monai/inferers/inferer.py | 10 +- monai/inferers/merger.py | 29 +++--- monai/losses/perceptual.py | 2 +- monai/losses/sure_loss.py | 2 +- .../blocks/feature_pyramid_network.py | 6 +- monai/networks/layers/vector_quantizer.py | 4 +- monai/networks/nets/hovernet.py | 9 +- monai/networks/nets/resnet.py | 4 +- monai/networks/nets/senet.py | 2 +- monai/networks/nets/swin_unetr.py | 95 +++++++++---------- monai/networks/nets/transchex.py | 5 +- monai/networks/nets/vista3d.py | 14 +-- monai/networks/utils.py | 9 +- monai/transforms/intensity/array.py | 2 +- monai/transforms/spatial/array.py | 12 +-- monai/utils/state_cacher.py | 2 +- requirements.txt | 4 +- runtests.sh | 2 +- tests/bundle/test_bundle_download.py | 22 +++-- tests/config/test_cv2_dist.py | 3 +- tests/data/meta_tensor/test_meta_tensor.py | 4 +- .../test_integration_classification_2d.py | 2 +- .../test_integration_fast_train.py | 4 +- .../test_integration_segmentation_3d.py | 2 +- .../test_compute_multiscalessim_metric.py | 6 +- tests/networks/nets/test_autoencoderkl.py | 2 +- tests/networks/nets/test_controlnet.py | 2 +- .../nets/test_diffusion_model_unet.py | 2 +- .../networks/nets/test_network_consistency.py | 2 +- tests/networks/nets/test_swin_unetr.py | 2 +- tests/networks/nets/test_transformer.py | 2 +- tests/networks/test_save_state.py | 2 +- 49 files changed, 183 insertions(+), 177 deletions(-) diff --git a/monai/apps/deepedit/interaction.py b/monai/apps/deepedit/interaction.py index 07302575c6..33e50700ca 100644 --- a/monai/apps/deepedit/interaction.py +++ b/monai/apps/deepedit/interaction.py @@ -72,7 +72,7 @@ def __call__(self, engine: SupervisedTrainer | SupervisedEvaluator, batchdata: d with torch.no_grad(): if engine.amp: - with torch.cuda.amp.autocast(): + with torch.autocast("cuda"): predictions = engine.inferer(inputs, engine.network) else: predictions = engine.inferer(inputs, engine.network) diff --git a/monai/apps/deepgrow/interaction.py b/monai/apps/deepgrow/interaction.py index fa3a28bfef..287f2d607c 100644 --- a/monai/apps/deepgrow/interaction.py +++ b/monai/apps/deepgrow/interaction.py @@ -67,7 +67,7 @@ def __call__(self, engine: SupervisedTrainer | SupervisedEvaluator, batchdata: d engine.network.eval() with torch.no_grad(): if engine.amp: - with torch.cuda.amp.autocast(): + with torch.autocast("cuda"): predictions = engine.inferer(inputs, engine.network) else: predictions = engine.inferer(inputs, engine.network) diff --git a/monai/apps/detection/networks/retinanet_detector.py b/monai/apps/detection/networks/retinanet_detector.py index a0573d6cd1..e996ae81bc 100644 --- a/monai/apps/detection/networks/retinanet_detector.py +++ b/monai/apps/detection/networks/retinanet_detector.py @@ -180,7 +180,7 @@ def forward(self, images: torch.Tensor): nesterov=True, ) torch.save(detector.network.state_dict(), 'model.pt') # save model - detector.network.load_state_dict(torch.load('model.pt')) # load model + detector.network.load_state_dict(torch.load('model.pt', weights_only=True)) # load model """ def __init__( diff --git a/monai/apps/detection/networks/retinanet_network.py b/monai/apps/detection/networks/retinanet_network.py index ca6a8f5c19..ead57d74c2 100644 --- a/monai/apps/detection/networks/retinanet_network.py +++ b/monai/apps/detection/networks/retinanet_network.py @@ -88,8 +88,8 @@ def __init__( for layer in self.conv.children(): if isinstance(layer, conv_type): # type: ignore - torch.nn.init.normal_(layer.weight, std=0.01) - torch.nn.init.constant_(layer.bias, 0) + torch.nn.init.normal_(layer.weight, std=0.01) # type: ignore[arg-type] + torch.nn.init.constant_(layer.bias, 0) # type: ignore[arg-type] self.cls_logits = conv_type(in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1) torch.nn.init.normal_(self.cls_logits.weight, std=0.01) @@ -167,8 +167,8 @@ def __init__(self, in_channels: int, num_anchors: int, spatial_dims: int): for layer in self.conv.children(): if isinstance(layer, conv_type): # type: ignore - torch.nn.init.normal_(layer.weight, std=0.01) - torch.nn.init.zeros_(layer.bias) + torch.nn.init.normal_(layer.weight, std=0.01) # type: ignore[arg-type] + torch.nn.init.zeros_(layer.bias) # type: ignore[arg-type] def forward(self, x: list[Tensor]) -> list[Tensor]: """ @@ -297,7 +297,7 @@ def __init__( ) self.feature_extractor = feature_extractor - self.feature_map_channels: int = self.feature_extractor.out_channels + self.feature_map_channels: int = self.feature_extractor.out_channels # type: ignore[assignment] self.num_anchors = num_anchors self.classification_head = RetinaNetClassificationHead( self.feature_map_channels, self.num_anchors, self.num_classes, spatial_dims=self.spatial_dims diff --git a/monai/apps/detection/utils/box_coder.py b/monai/apps/detection/utils/box_coder.py index 504ae21d0f..d0f3adf71d 100644 --- a/monai/apps/detection/utils/box_coder.py +++ b/monai/apps/detection/utils/box_coder.py @@ -221,7 +221,7 @@ def decode_single(self, rel_codes: Tensor, reference_boxes: Tensor) -> Tensor: pred_ctr_xyx_axis = dxyz_axis * whd_axis[:, None] + ctr_xyz_axis[:, None] pred_whd_axis = torch.exp(dwhd_axis) * whd_axis[:, None] - pred_whd_axis = pred_whd_axis.to(dxyz_axis.dtype) + pred_whd_axis = pred_whd_axis.to(dxyz_axis.dtype) # type: ignore[union-attr] # When convert float32 to float16, Inf or Nan may occur if torch.isnan(pred_whd_axis).any() or torch.isinf(pred_whd_axis).any(): @@ -229,7 +229,7 @@ def decode_single(self, rel_codes: Tensor, reference_boxes: Tensor) -> Tensor: # Distance from center to box's corner. c_to_c_whd_axis = ( - torch.tensor(0.5, dtype=pred_ctr_xyx_axis.dtype, device=pred_whd_axis.device) * pred_whd_axis + torch.tensor(0.5, dtype=pred_ctr_xyx_axis.dtype, device=pred_whd_axis.device) * pred_whd_axis # type: ignore[arg-type] ) pred_boxes.append(pred_ctr_xyx_axis - c_to_c_whd_axis) diff --git a/monai/apps/mmars/mmars.py b/monai/apps/mmars/mmars.py index 31c88a17be..1fc0690cc9 100644 --- a/monai/apps/mmars/mmars.py +++ b/monai/apps/mmars/mmars.py @@ -241,7 +241,7 @@ def load_from_mmar( return torch.jit.load(_model_file, map_location=map_location) # loading with `torch.load` - model_dict = torch.load(_model_file, map_location=map_location) + model_dict = torch.load(_model_file, map_location=map_location, weights_only=True) if weights_only: return model_dict.get(model_key, model_dict) # model_dict[model_key] or model_dict directly diff --git a/monai/apps/reconstruction/networks/blocks/varnetblock.py b/monai/apps/reconstruction/networks/blocks/varnetblock.py index 75dc7e15ce..289505a057 100644 --- a/monai/apps/reconstruction/networks/blocks/varnetblock.py +++ b/monai/apps/reconstruction/networks/blocks/varnetblock.py @@ -55,7 +55,7 @@ def soft_dc(self, x: Tensor, ref_kspace: Tensor, mask: Tensor) -> Tensor: Returns: Output of DC block with the same shape as x """ - return torch.where(mask, x - ref_kspace, self.zeros) * self.dc_weight + return torch.where(mask, x - ref_kspace, self.zeros) * self.dc_weight # type: ignore def forward(self, current_kspace: Tensor, ref_kspace: Tensor, mask: Tensor, sens_maps: Tensor) -> Tensor: """ diff --git a/monai/bundle/scripts.py b/monai/bundle/scripts.py index b43f7e0fa0..6f35179e96 100644 --- a/monai/bundle/scripts.py +++ b/monai/bundle/scripts.py @@ -760,7 +760,7 @@ def load( if load_ts_module is True: return load_net_with_metadata(full_path, map_location=torch.device(device), more_extra_files=config_files) # loading with `torch.load` - model_dict = torch.load(full_path, map_location=torch.device(device)) + model_dict = torch.load(full_path, map_location=torch.device(device), weights_only=True) if not isinstance(model_dict, Mapping): warnings.warn(f"the state dictionary from {full_path} should be a dictionary but got {type(model_dict)}.") @@ -1279,9 +1279,8 @@ def verify_net_in_out( if input_dtype == torch.float16: # fp16 can only be executed in gpu mode net.to("cuda") - from torch.cuda.amp import autocast - with autocast(): + with torch.autocast("cuda"): output = net(test_data.cuda(), **extra_forward_args_) net.to(device_) else: @@ -1330,7 +1329,7 @@ def _export( # here we use ignite Checkpoint to support nested weights and be compatible with MONAI CheckpointSaver Checkpoint.load_objects(to_load={key_in_ckpt: net}, checkpoint=ckpt_file) else: - ckpt = torch.load(ckpt_file) + ckpt = torch.load(ckpt_file, weights_only=True) copy_model_state(dst=net, src=ckpt if key_in_ckpt == "" else ckpt[key_in_ckpt]) # Use the given converter to convert a model and save with metadata, config content diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 8c53338d66..691425994d 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -22,7 +22,6 @@ import warnings from collections.abc import Callable, Sequence from copy import copy, deepcopy -from inspect import signature from multiprocessing.managers import ListProxy from multiprocessing.pool import ThreadPool from pathlib import Path @@ -372,10 +371,7 @@ def _cachecheck(self, item_transformed): if hashfile is not None and hashfile.is_file(): # cache hit try: - if "weights_only" in signature(torch.load).parameters: - return torch.load(hashfile, weights_only=False) - else: - return torch.load(hashfile) + return torch.load(hashfile, weights_only=False) except PermissionError as e: if sys.platform != "win32": raise e @@ -1674,7 +1670,4 @@ def _load_meta_cache(self, meta_hash_file_name): if meta_hash_file_name in self._meta_cache: return self._meta_cache[meta_hash_file_name] else: - if "weights_only" in signature(torch.load).parameters: - return torch.load(self.cache_dir / meta_hash_file_name, weights_only=False) - else: - return torch.load(self.cache_dir / meta_hash_file_name) + return torch.load(self.cache_dir / meta_hash_file_name, weights_only=False) diff --git a/monai/data/utils.py b/monai/data/utils.py index d03dbd3234..988b813272 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -753,7 +753,7 @@ def affine_to_spacing(affine: NdarrayTensor, r: int = 3, dtype=float, suppress_z if isinstance(_affine, torch.Tensor): spacing = torch.sqrt(torch.sum(_affine * _affine, dim=0)) else: - spacing = np.sqrt(np.sum(_affine * _affine, axis=0)) + spacing = np.sqrt(np.sum(_affine * _affine, axis=0)) # type: ignore[operator] if suppress_zeros: spacing[spacing == 0] = 1.0 spacing_, *_ = convert_to_dst_type(spacing, dst=affine, dtype=dtype) diff --git a/monai/data/video_dataset.py b/monai/data/video_dataset.py index 031e85db26..9ff23ebeff 100644 --- a/monai/data/video_dataset.py +++ b/monai/data/video_dataset.py @@ -177,7 +177,7 @@ def get_available_codecs() -> dict[str, str]: for codec, ext in all_codecs.items(): writer = cv2.VideoWriter() fname = os.path.join(tmp_dir, f"test{ext}") - fourcc = cv2.VideoWriter_fourcc(*codec) + fourcc = cv2.VideoWriter_fourcc(*codec) # type: ignore[attr-defined] noviderr = writer.open(fname, fourcc, 1, (10, 10)) if noviderr: codecs[codec] = ext diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index 35d4928465..836b407ac5 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -82,8 +82,8 @@ class Evaluator(Workflow): default to `True`. to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for `device`, `non_blocking`. - amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details: - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast. + amp_kwargs: dict of the args for `torch.autocast("cuda")` API, for more details: + https://pytorch.org/docs/stable/amp.html#torch.autocast. """ @@ -214,8 +214,8 @@ class SupervisedEvaluator(Evaluator): default to `True`. to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for `device`, `non_blocking`. - amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details: - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast. + amp_kwargs: dict of the args for `torch.autocast("cuda")` API, for more details: + https://pytorch.org/docs/stable/amp.html#torch.autocast. compile: whether to use `torch.compile`, default is False. If True, MetaTensor inputs will be converted to `torch.Tensor` before forward pass, then converted back afterward with copied meta information. compile_kwargs: dict of the args for `torch.compile()` API, for more details: @@ -324,7 +324,7 @@ def _iteration(self, engine: SupervisedEvaluator, batchdata: dict[str, torch.Ten # execute forward computation with engine.mode(engine.network): if engine.amp: - with torch.cuda.amp.autocast(**engine.amp_kwargs): + with torch.autocast("cuda", **engine.amp_kwargs): engine.state.output[Keys.PRED] = engine.inferer(inputs, engine.network, *args, **kwargs) else: engine.state.output[Keys.PRED] = engine.inferer(inputs, engine.network, *args, **kwargs) @@ -394,8 +394,8 @@ class EnsembleEvaluator(Evaluator): default to `True`. to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for `device`, `non_blocking`. - amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details: - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast. + amp_kwargs: dict of the args for `torch.autocast("cuda")` API, for more details: + https://pytorch.org/docs/stable/amp.html#torch.autocast. """ @@ -487,7 +487,7 @@ def _iteration(self, engine: EnsembleEvaluator, batchdata: dict[str, torch.Tenso for idx, network in enumerate(engine.networks): with engine.mode(network): if engine.amp: - with torch.cuda.amp.autocast(**engine.amp_kwargs): + with torch.autocast("cuda", **engine.amp_kwargs): if isinstance(engine.state.output, dict): engine.state.output.update( {engine.pred_keys[idx]: engine.inferer(inputs, network, *args, **kwargs)} diff --git a/monai/engines/trainer.py b/monai/engines/trainer.py index fdb45fbab8..b69a5015bb 100644 --- a/monai/engines/trainer.py +++ b/monai/engines/trainer.py @@ -125,8 +125,8 @@ class SupervisedTrainer(Trainer): more details: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html. to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for `device`, `non_blocking`. - amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details: - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast. + amp_kwargs: dict of the args for `torch.autocast("cuda")` API, for more details: + https://pytorch.org/docs/stable/amp.html#torch.autocast. compile: whether to use `torch.compile`, default is False. If True, MetaTensor inputs will be converted to `torch.Tensor` before forward pass, then converted back afterward with copied meta information. compile_kwargs: dict of the args for `torch.compile()` API, for more details: @@ -249,7 +249,7 @@ def _compute_pred_loss(): engine.optimizer.zero_grad(set_to_none=engine.optim_set_to_none) if engine.amp and engine.scaler is not None: - with torch.cuda.amp.autocast(**engine.amp_kwargs): + with torch.autocast("cuda", **engine.amp_kwargs): _compute_pred_loss() engine.scaler.scale(engine.state.output[Keys.LOSS]).backward() engine.fire_event(IterationEvents.BACKWARD_COMPLETED) @@ -335,8 +335,8 @@ class GanTrainer(Trainer): more details: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html. to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for `device`, `non_blocking`. - amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details: - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast. + amp_kwargs: dict of the args for `torch.autocast("cuda")` API, for more details: + https://pytorch.org/docs/stable/amp.html#torch.autocast. """ @@ -512,8 +512,8 @@ class AdversarialTrainer(Trainer): more details: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html. to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for `device`, `non_blocking`. - amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details: - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast. + amp_kwargs: dict of the args for `torch.autocast("cuda")` API, for more details: + https://pytorch.org/docs/stable/amp.html#torch.autocast. """ def __init__( @@ -683,7 +683,7 @@ def _compute_generator_loss() -> None: engine.state.g_optimizer.zero_grad(set_to_none=engine.optim_set_to_none) if engine.amp and engine.state.g_scaler is not None: - with torch.cuda.amp.autocast(**engine.amp_kwargs): + with torch.autocast("cuda", **engine.amp_kwargs): _compute_generator_loss() engine.state.output[Keys.LOSS] = ( @@ -731,7 +731,7 @@ def _compute_discriminator_loss() -> None: engine.state.d_network.zero_grad(set_to_none=engine.optim_set_to_none) if engine.amp and engine.state.d_scaler is not None: - with torch.cuda.amp.autocast(**engine.amp_kwargs): + with torch.autocast("cuda", **engine.amp_kwargs): _compute_discriminator_loss() engine.state.d_scaler.scale(engine.state.output[AdversarialKeys.DISCRIMINATOR_LOSS]).backward() diff --git a/monai/engines/utils.py b/monai/engines/utils.py index 8e19a18601..9095f8d943 100644 --- a/monai/engines/utils.py +++ b/monai/engines/utils.py @@ -309,7 +309,7 @@ def __init__(self, scheduler: nn.Module, num_train_timesteps: int, condition_nam self.scheduler = scheduler def get_target(self, images, noise, timesteps): - return self.scheduler.get_velocity(images, noise, timesteps) + return self.scheduler.get_velocity(images, noise, timesteps) # type: ignore[operator] def default_make_latent( diff --git a/monai/engines/workflow.py b/monai/engines/workflow.py index 0c36da6d3d..ecb0c4a070 100644 --- a/monai/engines/workflow.py +++ b/monai/engines/workflow.py @@ -90,8 +90,8 @@ class Workflow(Engine): default to `True`. to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for `device`, `non_blocking`. - amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details: - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast. + amp_kwargs: dict of the args for `torch.autocast("cuda")` API, for more details: + https://pytorch.org/docs/stable/amp.html#torch.autocast. Raises: TypeError: When ``data_loader`` is not a ``torch.utils.data.DataLoader``. diff --git a/monai/fl/client/monai_algo.py b/monai/fl/client/monai_algo.py index a3ac58c221..6e9a6fd1fe 100644 --- a/monai/fl/client/monai_algo.py +++ b/monai/fl/client/monai_algo.py @@ -574,7 +574,7 @@ def get_weights(self, extra=None): model_path = os.path.join(self.bundle_root, cast(str, self.model_filepaths[model_type])) if not os.path.isfile(model_path): raise ValueError(f"No best model checkpoint exists at {model_path}") - weights = torch.load(model_path, map_location="cpu") + weights = torch.load(model_path, map_location="cpu", weights_only=True) # if weights contain several state dicts, use the one defined by `save_dict_key` if isinstance(weights, dict) and self.save_dict_key in weights: weights = weights.get(self.save_dict_key) diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py index f48968ecfd..16cb875d03 100644 --- a/monai/handlers/checkpoint_loader.py +++ b/monai/handlers/checkpoint_loader.py @@ -122,7 +122,7 @@ def __call__(self, engine: Engine) -> None: Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ - checkpoint = torch.load(self.load_path, map_location=self.map_location) + checkpoint = torch.load(self.load_path, map_location=self.map_location, weights_only=False) k, _ = list(self.load_dict.items())[0] # single object and checkpoint is directly a state_dict diff --git a/monai/inferers/inferer.py b/monai/inferers/inferer.py index d35e777bcf..3607c6d57e 100644 --- a/monai/inferers/inferer.py +++ b/monai/inferers/inferer.py @@ -996,8 +996,8 @@ def get_likelihood( predicted_mean = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * noisy_image # get the posterior mean and variance - posterior_mean = scheduler._get_mean(timestep=t, x_0=inputs, x_t=noisy_image) - posterior_variance = scheduler._get_variance(timestep=t, predicted_variance=predicted_variance) + posterior_mean = scheduler._get_mean(timestep=t, x_0=inputs, x_t=noisy_image) # type: ignore[operator] + posterior_variance = scheduler._get_variance(timestep=t, predicted_variance=predicted_variance) # type: ignore[operator] log_posterior_variance = torch.log(posterior_variance) log_predicted_variance = torch.log(predicted_variance) if predicted_variance else log_posterior_variance @@ -1446,7 +1446,7 @@ def sample( # type: ignore[override] ) # 3. compute previous image: x_t -> x_t-1 - image, _ = scheduler.step(model_output, t, image) + image, _ = scheduler.step(model_output, t, image) # type: ignore[operator] if save_intermediates and t % intermediate_steps == 0: intermediates.append(image) if save_intermediates: @@ -1572,8 +1572,8 @@ def get_likelihood( # type: ignore[override] predicted_mean = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * noisy_image # get the posterior mean and variance - posterior_mean = scheduler._get_mean(timestep=t, x_0=inputs, x_t=noisy_image) - posterior_variance = scheduler._get_variance(timestep=t, predicted_variance=predicted_variance) + posterior_mean = scheduler._get_mean(timestep=t, x_0=inputs, x_t=noisy_image) # type: ignore[operator] + posterior_variance = scheduler._get_variance(timestep=t, predicted_variance=predicted_variance) # type: ignore[operator] log_posterior_variance = torch.log(posterior_variance) log_predicted_variance = torch.log(predicted_variance) if predicted_variance else log_posterior_variance diff --git a/monai/inferers/merger.py b/monai/inferers/merger.py index 1344207e18..a1ab8e8a56 100644 --- a/monai/inferers/merger.py +++ b/monai/inferers/merger.py @@ -53,8 +53,11 @@ def __init__( cropped_shape: Sequence[int] | None = None, device: torch.device | str | None = None, ) -> None: - self.merged_shape = merged_shape - self.cropped_shape = self.merged_shape if cropped_shape is None else cropped_shape + if merged_shape is None: + raise ValueError("Argument `merged_shape` must be provided") + + self.merged_shape: tuple[int, ...] = tuple(merged_shape) + self.cropped_shape: tuple[int, ...] = tuple(self.merged_shape if cropped_shape is None else cropped_shape) self.device = device self.is_finalized = False @@ -231,9 +234,9 @@ def __init__( dtype: np.dtype | str = "float32", value_dtype: np.dtype | str = "float32", count_dtype: np.dtype | str = "uint8", - store: zarr.storage.Store | str = "merged.zarr", - value_store: zarr.storage.Store | str | None = None, - count_store: zarr.storage.Store | str | None = None, + store: zarr.storage.Store | str = "merged.zarr", # type: ignore + value_store: zarr.storage.Store | str | None = None, # type: ignore + count_store: zarr.storage.Store | str | None = None, # type: ignore compressor: str | None = None, value_compressor: str | None = None, count_compressor: str | None = None, @@ -251,18 +254,18 @@ def __init__( if version_geq(get_package_version("zarr"), "3.0.0"): if value_store is None: self.tmpdir = TemporaryDirectory() - self.value_store = zarr.storage.LocalStore(self.tmpdir.name) + self.value_store = zarr.storage.LocalStore(self.tmpdir.name) # type: ignore else: - self.value_store = value_store + self.value_store = value_store # type: ignore if count_store is None: self.tmpdir = TemporaryDirectory() - self.count_store = zarr.storage.LocalStore(self.tmpdir.name) + self.count_store = zarr.storage.LocalStore(self.tmpdir.name) # type: ignore else: - self.count_store = count_store + self.count_store = count_store # type: ignore else: self.tmpdir = None - self.value_store = zarr.storage.TempStore() if value_store is None else value_store - self.count_store = zarr.storage.TempStore() if count_store is None else count_store + self.value_store = zarr.storage.TempStore() if value_store is None else value_store # type: ignore + self.count_store = zarr.storage.TempStore() if count_store is None else count_store # type: ignore self.chunks = chunks self.compressor = compressor self.value_compressor = value_compressor @@ -314,7 +317,7 @@ def aggregate(self, values: torch.Tensor, location: Sequence[int]) -> None: map_slice = ensure_tuple_size(map_slice, values.ndim, pad_val=slice(None), pad_from_start=True) with self.lock: self.values[map_slice] += values.numpy() - self.counts[map_slice] += 1 + self.counts[map_slice] += 1 # type: ignore[operator] def finalize(self) -> zarr.Array: """ @@ -332,7 +335,7 @@ def finalize(self) -> zarr.Array: if not self.is_finalized: # use chunks for division to fit into memory for chunk in iterate_over_chunks(self.values.chunks, self.values.cdata_shape): - self.output[chunk] = self.values[chunk] / self.counts[chunk] + self.output[chunk] = self.values[chunk] / self.counts[chunk] # type: ignore[operator] # finalize the shape self.output.resize(self.cropped_shape) # set finalize flag to protect performing in-place division again diff --git a/monai/losses/perceptual.py b/monai/losses/perceptual.py index a8ae90993a..ee653fac9d 100644 --- a/monai/losses/perceptual.py +++ b/monai/losses/perceptual.py @@ -374,7 +374,7 @@ def __init__( else: network = torchvision.models.resnet50(weights=None) if pretrained is True: - state_dict = torch.load(pretrained_path) + state_dict = torch.load(pretrained_path, weights_only=True) if pretrained_state_dict_key is not None: state_dict = state_dict[pretrained_state_dict_key] network.load_state_dict(state_dict) diff --git a/monai/losses/sure_loss.py b/monai/losses/sure_loss.py index ebf25613a6..fa8820885d 100644 --- a/monai/losses/sure_loss.py +++ b/monai/losses/sure_loss.py @@ -92,7 +92,7 @@ def sure_loss_function( y_ref = operator(x) # get perturbed output - x_perturbed = x + eps * perturb_noise + x_perturbed = x + eps * perturb_noise # type: ignore y_perturbed = operator(x_perturbed) # divergence divergence = torch.sum(1.0 / eps * torch.matmul(perturb_noise.permute(0, 1, 3, 2), y_perturbed - y_ref)) # type: ignore diff --git a/monai/networks/blocks/feature_pyramid_network.py b/monai/networks/blocks/feature_pyramid_network.py index 7de899803c..759a4efe0d 100644 --- a/monai/networks/blocks/feature_pyramid_network.py +++ b/monai/networks/blocks/feature_pyramid_network.py @@ -54,7 +54,9 @@ from collections import OrderedDict from collections.abc import Callable +from typing import cast +import torch import torch.nn.functional as F from torch import Tensor, nn @@ -194,8 +196,8 @@ def __init__( conv_type_: type[nn.Module] = Conv[Conv.CONV, spatial_dims] for m in self.modules(): if isinstance(m, conv_type_): - nn.init.kaiming_uniform_(m.weight, a=1) - nn.init.constant_(m.bias, 0.0) + nn.init.kaiming_uniform_(cast(torch.Tensor, m.weight), a=1) + nn.init.constant_(cast(torch.Tensor, m.bias), 0.0) if extra_blocks is not None: if not isinstance(extra_blocks, ExtraFPNBlock): diff --git a/monai/networks/layers/vector_quantizer.py b/monai/networks/layers/vector_quantizer.py index 9c354e1009..0ff7143b69 100644 --- a/monai/networks/layers/vector_quantizer.py +++ b/monai/networks/layers/vector_quantizer.py @@ -100,7 +100,7 @@ def quantize(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, to torch.Tensor: Quantization indices of shape [B,H,W,D,1] """ - with torch.cuda.amp.autocast(enabled=False): + with torch.autocast("cuda", enabled=False): encoding_indices_view = list(inputs.shape) del encoding_indices_view[1] @@ -138,7 +138,7 @@ def embed(self, embedding_indices: torch.Tensor) -> torch.Tensor: Returns: torch.Tensor: Quantize space representation of encoding_indices in channel first format. """ - with torch.cuda.amp.autocast(enabled=False): + with torch.autocast("cuda", enabled=False): embedding: torch.Tensor = ( self.embedding(embedding_indices).permute(self.quantization_permutation).contiguous() ) diff --git a/monai/networks/nets/hovernet.py b/monai/networks/nets/hovernet.py index 3745b66bb5..b773af91d4 100644 --- a/monai/networks/nets/hovernet.py +++ b/monai/networks/nets/hovernet.py @@ -633,9 +633,9 @@ def _remap_preact_resnet_model(model_url: str): # download the pretrained weights into torch hub's default dir weights_dir = os.path.join(torch.hub.get_dir(), "preact-resnet50.pth") download_url(model_url, fuzzy=True, filepath=weights_dir, progress=False) - state_dict = torch.load(weights_dir, map_location=None if torch.cuda.is_available() else torch.device("cpu"))[ - "desc" - ] + map_location = None if torch.cuda.is_available() else torch.device("cpu") + state_dict = torch.load(weights_dir, map_location=map_location, weights_only=True)["desc"] + for key in list(state_dict.keys()): new_key = None if pattern_conv0.match(key): @@ -668,7 +668,8 @@ def _remap_standard_resnet_model(model_url: str, state_dict_key: str | None = No # download the pretrained weights into torch hub's default dir weights_dir = os.path.join(torch.hub.get_dir(), "resnet50.pth") download_url(model_url, fuzzy=True, filepath=weights_dir, progress=False) - state_dict = torch.load(weights_dir, map_location=None if torch.cuda.is_available() else torch.device("cpu")) + map_location = None if torch.cuda.is_available() else torch.device("cpu") + state_dict = torch.load(weights_dir, map_location=map_location, weights_only=True) if state_dict_key is not None: state_dict = state_dict[state_dict_key] diff --git a/monai/networks/nets/resnet.py b/monai/networks/nets/resnet.py index d62722478e..d24b86d27d 100644 --- a/monai/networks/nets/resnet.py +++ b/monai/networks/nets/resnet.py @@ -493,7 +493,7 @@ def _resnet( if isinstance(pretrained, str): if Path(pretrained).exists(): logger.info(f"Loading weights from {pretrained}...") - model_state_dict = torch.load(pretrained, map_location=device) + model_state_dict = torch.load(pretrained, map_location=device, weights_only=True) else: # Throw error raise FileNotFoundError("The pretrained checkpoint file is not found") @@ -665,7 +665,7 @@ def get_pretrained_resnet_medicalnet(resnet_depth: int, device: str = "cpu", dat raise EntryNotFoundError( f"{filename} not found on {medicalnet_huggingface_repo_basename}{resnet_depth}" ) from None - checkpoint = torch.load(pretrained_path, map_location=torch.device(device)) + checkpoint = torch.load(pretrained_path, map_location=torch.device(device), weights_only=True) else: raise NotImplementedError("Supported resnet_depth are: [10, 18, 34, 50, 101, 152, 200]") logger.info(f"{filename} downloaded") diff --git a/monai/networks/nets/senet.py b/monai/networks/nets/senet.py index 51435a9ea2..c14118ad20 100644 --- a/monai/networks/nets/senet.py +++ b/monai/networks/nets/senet.py @@ -302,7 +302,7 @@ def _load_state_dict(model: nn.Module, arch: str, progress: bool): if isinstance(model_url, dict): download_url(model_url["url"], filepath=model_url["filename"]) - state_dict = torch.load(model_url["filename"], map_location=None) + state_dict = torch.load(model_url["filename"], map_location=None, weights_only=True) else: state_dict = load_state_dict_from_url(model_url, progress=progress) for key in list(state_dict.keys()): diff --git a/monai/networks/nets/swin_unetr.py b/monai/networks/nets/swin_unetr.py index cfc5dda41f..22e1e6f659 100644 --- a/monai/networks/nets/swin_unetr.py +++ b/monai/networks/nets/swin_unetr.py @@ -272,53 +272,50 @@ def __init__( self.out = UnetOutBlock(spatial_dims=spatial_dims, in_channels=feature_size, out_channels=out_channels) def load_from(self, weights): + layers1_0: BasicLayer = self.swinViT.layers1[0] # type: ignore[assignment] + layers2_0: BasicLayer = self.swinViT.layers2[0] # type: ignore[assignment] + layers3_0: BasicLayer = self.swinViT.layers3[0] # type: ignore[assignment] + layers4_0: BasicLayer = self.swinViT.layers4[0] # type: ignore[assignment] + wstate = weights["state_dict"] + with torch.no_grad(): - self.swinViT.patch_embed.proj.weight.copy_(weights["state_dict"]["module.patch_embed.proj.weight"]) - self.swinViT.patch_embed.proj.bias.copy_(weights["state_dict"]["module.patch_embed.proj.bias"]) - for bname, block in self.swinViT.layers1[0].blocks.named_children(): - block.load_from(weights, n_block=bname, layer="layers1") - self.swinViT.layers1[0].downsample.reduction.weight.copy_( - weights["state_dict"]["module.layers1.0.downsample.reduction.weight"] - ) - self.swinViT.layers1[0].downsample.norm.weight.copy_( - weights["state_dict"]["module.layers1.0.downsample.norm.weight"] - ) - self.swinViT.layers1[0].downsample.norm.bias.copy_( - weights["state_dict"]["module.layers1.0.downsample.norm.bias"] - ) - for bname, block in self.swinViT.layers2[0].blocks.named_children(): - block.load_from(weights, n_block=bname, layer="layers2") - self.swinViT.layers2[0].downsample.reduction.weight.copy_( - weights["state_dict"]["module.layers2.0.downsample.reduction.weight"] - ) - self.swinViT.layers2[0].downsample.norm.weight.copy_( - weights["state_dict"]["module.layers2.0.downsample.norm.weight"] - ) - self.swinViT.layers2[0].downsample.norm.bias.copy_( - weights["state_dict"]["module.layers2.0.downsample.norm.bias"] - ) - for bname, block in self.swinViT.layers3[0].blocks.named_children(): - block.load_from(weights, n_block=bname, layer="layers3") - self.swinViT.layers3[0].downsample.reduction.weight.copy_( - weights["state_dict"]["module.layers3.0.downsample.reduction.weight"] - ) - self.swinViT.layers3[0].downsample.norm.weight.copy_( - weights["state_dict"]["module.layers3.0.downsample.norm.weight"] - ) - self.swinViT.layers3[0].downsample.norm.bias.copy_( - weights["state_dict"]["module.layers3.0.downsample.norm.bias"] - ) - for bname, block in self.swinViT.layers4[0].blocks.named_children(): - block.load_from(weights, n_block=bname, layer="layers4") - self.swinViT.layers4[0].downsample.reduction.weight.copy_( - weights["state_dict"]["module.layers4.0.downsample.reduction.weight"] - ) - self.swinViT.layers4[0].downsample.norm.weight.copy_( - weights["state_dict"]["module.layers4.0.downsample.norm.weight"] - ) - self.swinViT.layers4[0].downsample.norm.bias.copy_( - weights["state_dict"]["module.layers4.0.downsample.norm.bias"] - ) + self.swinViT.patch_embed.proj.weight.copy_(wstate["module.patch_embed.proj.weight"]) + self.swinViT.patch_embed.proj.bias.copy_(wstate["module.patch_embed.proj.bias"]) + for bname, block in layers1_0.blocks.named_children(): + block.load_from(weights, n_block=bname, layer="layers1") # type: ignore[operator] + + if layers1_0.downsample is not None: + d = layers1_0.downsample + d.reduction.weight.copy_(wstate["module.layers1.0.downsample.reduction.weight"]) # type: ignore + d.norm.weight.copy_(wstate["module.layers1.0.downsample.norm.weight"]) # type: ignore + d.norm.bias.copy_(wstate["module.layers1.0.downsample.norm.bias"]) # type: ignore + + for bname, block in layers2_0.blocks.named_children(): + block.load_from(weights, n_block=bname, layer="layers2") # type: ignore[operator] + + if layers2_0.downsample is not None: + d = layers2_0.downsample + d.reduction.weight.copy_(wstate["module.layers2.0.downsample.reduction.weight"]) # type: ignore + d.norm.weight.copy_(wstate["module.layers2.0.downsample.norm.weight"]) # type: ignore + d.norm.bias.copy_(wstate["module.layers2.0.downsample.norm.bias"]) # type: ignore + + for bname, block in layers3_0.blocks.named_children(): + block.load_from(weights, n_block=bname, layer="layers3") # type: ignore[operator] + + if layers3_0.downsample is not None: + d = layers3_0.downsample + d.reduction.weight.copy_(wstate["module.layers3.0.downsample.reduction.weight"]) # type: ignore + d.norm.weight.copy_(wstate["module.layers3.0.downsample.norm.weight"]) # type: ignore + d.norm.bias.copy_(wstate["module.layers3.0.downsample.norm.bias"]) # type: ignore + + for bname, block in layers4_0.blocks.named_children(): + block.load_from(weights, n_block=bname, layer="layers4") # type: ignore[operator] + + if layers4_0.downsample is not None: + d = layers4_0.downsample + d.reduction.weight.copy_(wstate["module.layers4.0.downsample.reduction.weight"]) # type: ignore + d.norm.weight.copy_(wstate["module.layers4.0.downsample.norm.weight"]) # type: ignore + d.norm.bias.copy_(wstate["module.layers4.0.downsample.norm.bias"]) # type: ignore @torch.jit.unused def _check_input_size(self, spatial_shape): @@ -532,7 +529,7 @@ def forward(self, x, mask): q = q * self.scale attn = q @ k.transpose(-2, -1) relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index.clone()[:n, :n].reshape(-1) + self.relative_position_index.clone()[:n, :n].reshape(-1) # type: ignore[operator] ].reshape(n, n, -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() attn = attn + relative_position_bias.unsqueeze(0) @@ -691,7 +688,7 @@ def load_from(self, weights, n_block, layer): self.norm1.weight.copy_(weights["state_dict"][root + block_names[0]]) self.norm1.bias.copy_(weights["state_dict"][root + block_names[1]]) self.attn.relative_position_bias_table.copy_(weights["state_dict"][root + block_names[2]]) - self.attn.relative_position_index.copy_(weights["state_dict"][root + block_names[3]]) + self.attn.relative_position_index.copy_(weights["state_dict"][root + block_names[3]]) # type: ignore[operator] self.attn.qkv.weight.copy_(weights["state_dict"][root + block_names[4]]) self.attn.qkv.bias.copy_(weights["state_dict"][root + block_names[5]]) self.attn.proj.weight.copy_(weights["state_dict"][root + block_names[6]]) @@ -1118,7 +1115,7 @@ def filter_swinunetr(key, value): ) ssl_weights_path = "./ssl_pretrained_weights.pth" download_url(resource, ssl_weights_path) - ssl_weights = torch.load(ssl_weights_path)["model"] + ssl_weights = torch.load(ssl_weights_path, weights_only=True)["model"] dst_dict, loaded, not_loaded = copy_model_state(model, ssl_weights, filter_func=filter_swinunetr) diff --git a/monai/networks/nets/transchex.py b/monai/networks/nets/transchex.py index 6bfff3c956..bd756ec214 100644 --- a/monai/networks/nets/transchex.py +++ b/monai/networks/nets/transchex.py @@ -43,7 +43,7 @@ def __init__(self, *inputs, **kwargs) -> None: def init_bert_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) # type: ignore[union-attr,arg-type] elif isinstance(module, torch.nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @@ -68,7 +68,8 @@ def from_pretrained( weights_path = cached_file(path_or_repo_id, filename, cache_dir=cache_dir) model = cls(num_language_layers, num_vision_layers, num_mixed_layers, bert_config, *inputs, **kwargs) if state_dict is None and not from_tf: - state_dict = torch.load(weights_path, map_location="cpu" if not torch.cuda.is_available() else None) + map_location = "cpu" if not torch.cuda.is_available() else None + state_dict = torch.load(weights_path, map_location=map_location, weights_only=True) if from_tf: return load_tf_weights_in_bert(model, weights_path) old_keys = [] diff --git a/monai/networks/nets/vista3d.py b/monai/networks/nets/vista3d.py index 6ecb664b85..a5c2cc13ef 100644 --- a/monai/networks/nets/vista3d.py +++ b/monai/networks/nets/vista3d.py @@ -315,7 +315,7 @@ def set_auto_grad(self, auto_freeze: bool = False, point_freeze: bool = False): """ if auto_freeze != self.auto_freeze: if hasattr(self.image_encoder, "set_auto_grad"): - self.image_encoder.set_auto_grad(auto_freeze=auto_freeze, point_freeze=point_freeze) + self.image_encoder.set_auto_grad(auto_freeze=auto_freeze, point_freeze=point_freeze) # type: ignore[operator] else: for param in self.image_encoder.parameters(): param.requires_grad = (not auto_freeze) and (not point_freeze) @@ -325,7 +325,7 @@ def set_auto_grad(self, auto_freeze: bool = False, point_freeze: bool = False): if point_freeze != self.point_freeze: if hasattr(self.image_encoder, "set_auto_grad"): - self.image_encoder.set_auto_grad(auto_freeze=auto_freeze, point_freeze=point_freeze) + self.image_encoder.set_auto_grad(auto_freeze=auto_freeze, point_freeze=point_freeze) # type: ignore[operator] else: for param in self.image_encoder.parameters(): param.requires_grad = (not auto_freeze) and (not point_freeze) @@ -543,10 +543,10 @@ def forward( point_embedding = self.pe_layer.forward_with_coords(points, out_shape) # type: ignore point_embedding[point_labels == -1] = 0.0 point_embedding[point_labels == -1] += self.not_a_point_embed.weight - point_embedding[point_labels == 0] += self.point_embeddings[0].weight - point_embedding[point_labels == 1] += self.point_embeddings[1].weight - point_embedding[point_labels == 2] += self.point_embeddings[0].weight + self.special_class_embed.weight - point_embedding[point_labels == 3] += self.point_embeddings[1].weight + self.special_class_embed.weight + point_embedding[point_labels == 0] += self.point_embeddings[0].weight # type: ignore[arg-type] + point_embedding[point_labels == 1] += self.point_embeddings[1].weight # type: ignore[arg-type] + point_embedding[point_labels == 2] += self.point_embeddings[0].weight + self.special_class_embed.weight # type: ignore[operator] + point_embedding[point_labels == 3] += self.point_embeddings[1].weight + self.special_class_embed.weight # type: ignore[operator] output_tokens = self.mask_tokens.weight output_tokens = output_tokens.unsqueeze(0).expand(point_embedding.size(0), -1, -1) @@ -884,7 +884,7 @@ def _pe_encoding(self, coords: torch.torch.Tensor) -> torch.torch.Tensor: coords = 2 * coords - 1 # [bs=1,N=2,2] @ [2,128] # [bs=1, N=2, 128] - coords = coords @ self.positional_encoding_gaussian_matrix + coords = coords @ self.positional_encoding_gaussian_matrix # type: ignore[operator] coords = 2 * np.pi * coords # outputs d_1 x ... x d_n x C shape # [bs=1, N=2, 128+128=256] diff --git a/monai/networks/utils.py b/monai/networks/utils.py index 2279bed0b4..a41d4b1e33 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -22,7 +22,7 @@ from collections.abc import Callable, Mapping, Sequence from contextlib import contextmanager from copy import deepcopy -from typing import Any +from typing import Any, Iterable import numpy as np import torch @@ -1238,7 +1238,7 @@ def __init__(self, mod): def forward(self, x): dtype = x.dtype - with torch.amp.autocast("cuda", enabled=False): + with torch.autocast("cuda", enabled=False): ret = self.mod.forward(x.to(torch.float32)).to(dtype) return ret @@ -1255,7 +1255,7 @@ def __init__(self, mod): def forward(self, *args): from_dtype = args[0].dtype - with torch.amp.autocast("cuda", enabled=False): + with torch.autocast("cuda", enabled=False): ret = self.mod.forward(*cast_all(args, from_dtype=from_dtype, to_dtype=torch.float32)) return cast_all(ret, from_dtype=torch.float32, to_dtype=from_dtype) @@ -1291,7 +1291,8 @@ def simple_replace(base_t: type[nn.Module], dest_t: type[nn.Module]) -> Callable def expansion_fn(mod: nn.Module) -> nn.Module | None: if not isinstance(mod, base_t): return None - args = [getattr(mod, name, None) for name in mod.__constants__] + constants: Iterable = mod.__constants__ # type: ignore[assignment] + args = [getattr(mod, name, None) for name in constants] out = dest_t(*args) return out diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 8fe658ad3e..ed0a1ad9ac 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1856,7 +1856,7 @@ def interp(self, x: NdarrayOrTensor, xp: NdarrayOrTensor, fp: NdarrayOrTensor) - indices = ns.searchsorted(xp.reshape(-1), x.reshape(-1)) - 1 indices = ns.clip(indices, 0, len(m) - 1) - f = (m[indices] * x.reshape(-1) + b[indices]).reshape(x.shape) + f: NdarrayOrTensor = (m[indices] * x.reshape(-1) + b[indices]).reshape(x.shape) f[x < xp[0]] = fp[0] f[x > xp[-1]] = fp[-1] return f diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index e4ed196eff..a75bb390cd 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -1758,13 +1758,13 @@ def __call__( if self.affine is None: affine = torch.eye(spatial_dims + 1, device=_device) if self.rotate_params: - affine @= create_rotate(spatial_dims, self.rotate_params, device=_device, backend=_b) + affine @= create_rotate(spatial_dims, self.rotate_params, device=_device, backend=_b) # type: ignore[assignment] if self.shear_params: - affine @= create_shear(spatial_dims, self.shear_params, device=_device, backend=_b) + affine @= create_shear(spatial_dims, self.shear_params, device=_device, backend=_b) # type: ignore[assignment] if self.translate_params: - affine @= create_translate(spatial_dims, self.translate_params, device=_device, backend=_b) + affine @= create_translate(spatial_dims, self.translate_params, device=_device, backend=_b) # type: ignore[assignment] if self.scale_params: - affine @= create_scale(spatial_dims, self.scale_params, device=_device, backend=_b) + affine @= create_scale(spatial_dims, self.scale_params, device=_device, backend=_b) # type: ignore[assignment] else: affine = self.affine # type: ignore affine = to_affine_nd(spatial_dims, affine) @@ -1780,7 +1780,7 @@ def __call__( grid_ = ((affine @ sc) @ grid_.view((grid_.shape[0], -1))).view([-1] + list(grid_.shape[1:])) else: grid_ = (affine @ grid_.view((grid_.shape[0], -1))).view([-1] + list(grid_.shape[1:])) - return grid_, affine + return grid_, affine # type: ignore[return-value] class RandAffineGrid(Randomizable, LazyTransform): @@ -3257,7 +3257,7 @@ def filter_threshold(self, image_np: NdarrayOrTensor, locations: np.ndarray) -> tuple[NdarrayOrTensor, numpy.ndarray]: tuple of filtered patches and locations. """ n_dims = len(image_np.shape) - idx = argwhere(image_np.sum(tuple(range(1, n_dims))) < self.threshold).reshape(-1) + idx = argwhere(image_np.sum(tuple(range(1, n_dims))) < self.threshold).reshape(-1) # type: ignore[operator] idx_np = convert_data_type(idx, np.ndarray)[0] return image_np[idx], locations[idx_np] diff --git a/monai/utils/state_cacher.py b/monai/utils/state_cacher.py index 60a074544b..c59436525c 100644 --- a/monai/utils/state_cacher.py +++ b/monai/utils/state_cacher.py @@ -124,7 +124,7 @@ def retrieve(self, key: Hashable) -> Any: fn = self.cached[key]["obj"] # pytype: disable=attribute-error if not os.path.exists(fn): # pytype: disable=wrong-arg-types raise RuntimeError(f"Failed to load state in {fn}. File doesn't exist anymore.") - data_obj = torch.load(fn, map_location=lambda storage, location: storage) + data_obj = torch.load(fn, map_location=lambda storage, location: storage, weights_only=False) # copy back to device if necessary if "device" in self.cached[key]: data_obj = data_obj.to(self.cached[key]["device"]) diff --git a/requirements.txt b/requirements.txt index 452a62adda..ad394ce807 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ -torch>=2.3.0,<2.6; sys_platform != 'win32' -torch>=2.4.1,<2.6; sys_platform == 'win32' +torch>=2.3.0; sys_platform != 'win32' +torch>=2.4.1; sys_platform == 'win32' numpy>=1.24,<3.0 diff --git a/runtests.sh b/runtests.sh index 2a399d5c3a..fd7df79722 100755 --- a/runtests.sh +++ b/runtests.sh @@ -120,7 +120,7 @@ function print_usage { # FIXME: https://github.com/Project-MONAI/MONAI/issues/4354 protobuf_major_version=$("${PY_EXE}" -m pip list | grep '^protobuf ' | tr -s ' ' | cut -d' ' -f2 | cut -d'.' -f1) -if [ "$protobuf_major_version" -ge "4" ] +if [ ! -z "$protobuf_major_version" ] && [ "$protobuf_major_version" -ge "4" ] then export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python fi diff --git a/tests/bundle/test_bundle_download.py b/tests/bundle/test_bundle_download.py index 38620d98ff..da58a6313e 100644 --- a/tests/bundle/test_bundle_download.py +++ b/tests/bundle/test_bundle_download.py @@ -266,6 +266,7 @@ def test_load_weights(self, bundle_files, bundle_name, repo, device, model_file) with skip_if_downloading_fails(): # download bundle, and load weights from the downloaded path with tempfile.TemporaryDirectory() as tempdir: + bundle_root = os.path.join(tempdir, bundle_name) # load weights weights = load( name=bundle_name, @@ -278,7 +279,7 @@ def test_load_weights(self, bundle_files, bundle_name, repo, device, model_file) return_state_dict=True, ) # prepare network - with open(os.path.join(tempdir, bundle_name, bundle_files[2])) as f: + with open(os.path.join(bundle_root, bundle_files[2])) as f: net_args = json.load(f)["network_def"] model_name = net_args["_target_"] del net_args["_target_"] @@ -288,9 +289,13 @@ def test_load_weights(self, bundle_files, bundle_name, repo, device, model_file) model.eval() # prepare data and test - input_tensor = torch.load(os.path.join(tempdir, bundle_name, bundle_files[4]), map_location=device) + input_tensor = torch.load( + os.path.join(bundle_root, bundle_files[4]), map_location=device, weights_only=True + ) output = model.forward(input_tensor) - expected_output = torch.load(os.path.join(tempdir, bundle_name, bundle_files[3]), map_location=device) + expected_output = torch.load( + os.path.join(bundle_root, bundle_files[3]), map_location=device, weights_only=True + ) assert_allclose(output, expected_output, atol=1e-4, rtol=1e-4, type_test=False) # load instantiated model directly and test, since the bundle has been downloaded, @@ -350,7 +355,7 @@ def test_load_weights_with_net_override(self, bundle_name, device, net_override) config_file=f"{tempdir}/spleen_ct_segmentation/configs/train.json", workflow_type="train" ) expected_model = workflow.network_def.to(device) - expected_model.load_state_dict(torch.load(model_path)) + expected_model.load_state_dict(torch.load(model_path, weights_only=True)) expected_output = expected_model(input_tensor) assert_allclose(output, expected_output, atol=1e-4, rtol=1e-4, type_test=False) @@ -378,6 +383,7 @@ def test_load_ts_module(self, bundle_files, bundle_name, version, repo, device, with skip_if_downloading_fails(): # load ts module with tempfile.TemporaryDirectory() as tempdir: + bundle_root = os.path.join(tempdir, bundle_name) # load ts module model_ts, metadata, extra_file_dict = load( name=bundle_name, @@ -393,9 +399,13 @@ def test_load_ts_module(self, bundle_files, bundle_name, version, repo, device, ) # prepare and test ts - input_tensor = torch.load(os.path.join(tempdir, bundle_name, bundle_files[1]), map_location=device) + input_tensor = torch.load( + os.path.join(bundle_root, bundle_files[1]), map_location=device, weights_only=True + ) output = model_ts.forward(input_tensor) - expected_output = torch.load(os.path.join(tempdir, bundle_name, bundle_files[0]), map_location=device) + expected_output = torch.load( + os.path.join(bundle_root, bundle_files[0]), map_location=device, weights_only=True + ) assert_allclose(output, expected_output, atol=1e-4, rtol=1e-4, type_test=False) # test metadata self.assertTrue(metadata["pytorch_version"] == "1.7.1") diff --git a/tests/config/test_cv2_dist.py b/tests/config/test_cv2_dist.py index 2ef8e5b10f..3bcb68e553 100644 --- a/tests/config/test_cv2_dist.py +++ b/tests/config/test_cv2_dist.py @@ -16,7 +16,6 @@ import numpy as np import torch import torch.distributed as dist -from torch.cuda.amp import autocast # FIXME: test for the workaround of https://github.com/Project-MONAI/MONAI/issues/5291 from monai.config.deviceconfig import print_config @@ -33,7 +32,7 @@ def main_worker(rank, ngpus_per_node, port): model, device_ids=[rank], output_device=rank, find_unused_parameters=False ) x = torch.ones(1, 1, 12, 12, 12).to(rank) - with autocast(enabled=True): + with torch.autocast("cuda"): model(x) if dist.is_initialized(): diff --git a/tests/data/meta_tensor/test_meta_tensor.py b/tests/data/meta_tensor/test_meta_tensor.py index cd3def4de1..f52d70e7b6 100644 --- a/tests/data/meta_tensor/test_meta_tensor.py +++ b/tests/data/meta_tensor/test_meta_tensor.py @@ -245,7 +245,7 @@ def test_pickling(self): with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "im.pt") torch.save(m, fname) - m2 = torch.load(fname) + m2 = torch.load(fname, weights_only=False) self.check(m2, m, ids=False) @skip_if_no_cuda @@ -256,7 +256,7 @@ def test_amp(self): conv = torch.nn.Conv2d(im.shape[1], 5, 3) conv.to(device) im_conv = conv(im) - with torch.cuda.amp.autocast(): + with torch.autocast("cuda"): im_conv2 = conv(im) self.check(im_conv2, im_conv, ids=False, rtol=1e-2, atol=1e-2) diff --git a/tests/integration/test_integration_classification_2d.py b/tests/integration/test_integration_classification_2d.py index fd9e58aaf8..aecfa2efab 100644 --- a/tests/integration/test_integration_classification_2d.py +++ b/tests/integration/test_integration_classification_2d.py @@ -166,7 +166,7 @@ def run_inference_test(root_dir, test_x, test_y, device="cuda:0", num_workers=10 model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(test_y))).to(device) model_filename = os.path.join(root_dir, "best_metric_model.pth") - model.load_state_dict(torch.load(model_filename)) + model.load_state_dict(torch.load(model_filename, weights_only=True)) y_true = [] y_pred = [] with eval_mode(model): diff --git a/tests/integration/test_integration_fast_train.py b/tests/integration/test_integration_fast_train.py index f9beb5613d..814c4b182c 100644 --- a/tests/integration/test_integration_fast_train.py +++ b/tests/integration/test_integration_fast_train.py @@ -186,7 +186,7 @@ def test_train_timing(self): step += 1 optimizer.zero_grad() # set AMP for training - with torch.cuda.amp.autocast(): + with torch.autocast("cuda"): outputs = model(batch_data["image"]) loss = loss_function(outputs, batch_data["label"]) scaler.scale(loss).backward() @@ -207,7 +207,7 @@ def test_train_timing(self): roi_size = (96, 96, 96) sw_batch_size = 4 # set AMP for validation - with torch.cuda.amp.autocast(): + with torch.autocast("cuda"): val_outputs = sliding_window_inference(val_data["image"], roi_size, sw_batch_size, model) val_outputs = [post_pred(i) for i in decollate_batch(val_outputs)] diff --git a/tests/integration/test_integration_segmentation_3d.py b/tests/integration/test_integration_segmentation_3d.py index fb2937739f..7c30150505 100644 --- a/tests/integration/test_integration_segmentation_3d.py +++ b/tests/integration/test_integration_segmentation_3d.py @@ -216,7 +216,7 @@ def run_inference_test(root_dir, device="cuda:0"): ).to(device) model_filename = os.path.join(root_dir, "best_metric_model.pth") - model.load_state_dict(torch.load(model_filename)) + model.load_state_dict(torch.load(model_filename, weights_only=True)) with eval_mode(model): # resampling with align_corners=True or dtype=float64 will generate # slight different results between PyTorch 1.5 an 1.6 diff --git a/tests/metrics/test_compute_multiscalessim_metric.py b/tests/metrics/test_compute_multiscalessim_metric.py index 3df8026c2b..d85e6f7bf6 100644 --- a/tests/metrics/test_compute_multiscalessim_metric.py +++ b/tests/metrics/test_compute_multiscalessim_metric.py @@ -32,7 +32,7 @@ def test2d_gaussian(self): metric(preds, target) result = metric.aggregate() expected_value = 0.023176 - self.assertTrue(expected_value - result.item() < 0.000001) + self.assertAlmostEqual(expected_value, result.item(), 4) def test2d_uniform(self): set_determinism(0) @@ -45,7 +45,7 @@ def test2d_uniform(self): metric(preds, target) result = metric.aggregate() expected_value = 0.022655 - self.assertTrue(expected_value - result.item() < 0.000001) + self.assertAlmostEqual(expected_value, result.item(), 4) def test3d_gaussian(self): set_determinism(0) @@ -58,7 +58,7 @@ def test3d_gaussian(self): metric(preds, target) result = metric.aggregate() expected_value = 0.061796 - self.assertTrue(expected_value - result.item() < 0.000001) + self.assertAlmostEqual(expected_value, result.item(), 4) def input_ill_input_shape2d(self): metric = MultiScaleSSIMMetric(spatial_dims=3, weights=[0.5, 0.5]) diff --git a/tests/networks/nets/test_autoencoderkl.py b/tests/networks/nets/test_autoencoderkl.py index 0a3db60830..2d4c5b66ca 100644 --- a/tests/networks/nets/test_autoencoderkl.py +++ b/tests/networks/nets/test_autoencoderkl.py @@ -330,7 +330,7 @@ def test_compatibility_with_monai_generative(self): weight_path = os.path.join(tmpdir, filename) download_url(url=url, filepath=weight_path, hash_val=hash_val, hash_type=hash_type) - net.load_old_state_dict(torch.load(weight_path), verbose=False) + net.load_old_state_dict(torch.load(weight_path, weights_only=True), verbose=False) if __name__ == "__main__": diff --git a/tests/networks/nets/test_controlnet.py b/tests/networks/nets/test_controlnet.py index 9503518762..6158dc2eef 100644 --- a/tests/networks/nets/test_controlnet.py +++ b/tests/networks/nets/test_controlnet.py @@ -208,7 +208,7 @@ def test_compatibility_with_monai_generative(self): weight_path = os.path.join(tmpdir, filename) download_url(url=url, filepath=weight_path, hash_val=hash_val, hash_type=hash_type) - net.load_old_state_dict(torch.load(weight_path), verbose=False) + net.load_old_state_dict(torch.load(weight_path, weights_only=True), verbose=False) if __name__ == "__main__": diff --git a/tests/networks/nets/test_diffusion_model_unet.py b/tests/networks/nets/test_diffusion_model_unet.py index a7c823709d..3bca26882c 100644 --- a/tests/networks/nets/test_diffusion_model_unet.py +++ b/tests/networks/nets/test_diffusion_model_unet.py @@ -578,7 +578,7 @@ def test_compatibility_with_monai_generative(self): weight_path = os.path.join(tmpdir, filename) download_url(url=url, filepath=weight_path, hash_val=hash_val, hash_type=hash_type) - net.load_old_state_dict(torch.load(weight_path), verbose=False) + net.load_old_state_dict(torch.load(weight_path, weights_only=True), verbose=False) if __name__ == "__main__": diff --git a/tests/networks/nets/test_network_consistency.py b/tests/networks/nets/test_network_consistency.py index e09826de75..4ce198b92f 100644 --- a/tests/networks/nets/test_network_consistency.py +++ b/tests/networks/nets/test_network_consistency.py @@ -55,7 +55,7 @@ def test_network_consistency(self, net_name, data_path, json_path): print("JSON path: " + json_path) # Load data - loaded_data = torch.load(data_path) + loaded_data = torch.load(data_path, weights_only=True) # Load json from file json_file = open(json_path) diff --git a/tests/networks/nets/test_swin_unetr.py b/tests/networks/nets/test_swin_unetr.py index 4908907bfe..2c4532ecc4 100644 --- a/tests/networks/nets/test_swin_unetr.py +++ b/tests/networks/nets/test_swin_unetr.py @@ -128,7 +128,7 @@ def test_filter_swinunetr(self, input_param, key, value): data_spec["url"], weight_path, hash_val=data_spec["hash_val"], hash_type=data_spec["hash_type"] ) - ssl_weight = torch.load(weight_path)["model"] + ssl_weight = torch.load(weight_path, weights_only=True)["model"] net = SwinUNETR(**input_param) dst_dict, loaded, not_loaded = copy_model_state(net, ssl_weight, filter_func=filter_swinunetr) assert_allclose(dst_dict[key][:8], value, atol=1e-4, rtol=1e-4, type_test=False) diff --git a/tests/networks/nets/test_transformer.py b/tests/networks/nets/test_transformer.py index f9264ba153..daf424c174 100644 --- a/tests/networks/nets/test_transformer.py +++ b/tests/networks/nets/test_transformer.py @@ -101,7 +101,7 @@ def test_compatibility_with_monai_generative(self): weight_path = os.path.join(tmpdir, filename) download_url(url=url, filepath=weight_path, hash_val=hash_val, hash_type=hash_type) - net.load_old_state_dict(torch.load(weight_path), verbose=False) + net.load_old_state_dict(torch.load(weight_path, weights_only=True), verbose=False) if __name__ == "__main__": diff --git a/tests/networks/test_save_state.py b/tests/networks/test_save_state.py index 0581a3ce1f..329065da2b 100644 --- a/tests/networks/test_save_state.py +++ b/tests/networks/test_save_state.py @@ -64,7 +64,7 @@ def test_file(self, src, expected_keys, create_dir=True, atomic=True, func=None, if kwargs is None: kwargs = {} save_state(src=src, path=path, create_dir=create_dir, atomic=atomic, func=func, **kwargs) - ckpt = dict(torch.load(path)) + ckpt = dict(torch.load(path, weights_only=True)) for k in ckpt.keys(): self.assertIn(k, expected_keys) From 8555b674a4e0b55abcbc4a851b2faa3e9ac55871 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 14:56:52 +0000 Subject: [PATCH 45/55] make it 2D/3D compartible, rm a outdated comment Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 5bdeae0931..e86306a314 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -174,10 +174,15 @@ def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timeste timepoints: torch.Tensor = timesteps.float() / self.num_train_timesteps timepoints = 1 - timepoints # [1,1/1000] - # timepoint (bsz) noise: (bsz, 4, frame, w ,h) # expand timepoint to noise shape - timepoints = timepoints.unsqueeze(1).unsqueeze(1).unsqueeze(1).unsqueeze(1) - timepoints = timepoints.repeat(1, noise.shape[1], noise.shape[2], noise.shape[3], noise.shape[4]) + if len(noise.shape) == 5: + timepoints = timepoints.unsqueeze(1).unsqueeze(1).unsqueeze(1).unsqueeze(1) + timepoints = timepoints.repeat(1, noise.shape[1], noise.shape[2], noise.shape[3], noise.shape[4]) + elif len(noise.shape) == 4: + timepoints = timepoints.unsqueeze(1).unsqueeze(1).unsqueeze(1) + timepoints = timepoints.repeat(1, noise.shape[1], noise.shape[2], noise.shape[3]) + else: + raise ValueError(f"noise has to be 4D or 5D tensor. yet got shape of {noise.shape}.") noisy_samples: torch.Tensor = timepoints * original_samples + (1 - timepoints) * noise return noisy_samples @@ -246,7 +251,7 @@ def sample_timesteps(self, x_start): t = t.long() if self.use_timestep_transform: - input_img_size_numel = torch.prod(torch.tensor(x_start.shape[-3:])) + input_img_size_numel = torch.prod(torch.tensor(x_start.shape[2:])) t = timestep_transform( t, input_img_size_numel=input_img_size_numel, From 14664e808c14d69ca7867e38b1214093da66830d Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 15:07:05 +0000 Subject: [PATCH 46/55] make it 2D/3D compartible, rm a outdated comment Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index e86306a314..5596cfffcb 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -82,6 +82,7 @@ class RFlowScheduler(Scheduler): transform_scale (float): Scaling factor for timestep transformation, used only if use_timestep_transform=True. steps_offset (int): Offset added to computed timesteps, used only if use_timestep_transform=True. base_img_size_numel (int): Reference image volume size for scaling, used only if use_timestep_transform=True. + spatial_dim (int): 2 or 3, incidcating 2D or 3D images, used only if use_timestep_transform=True. Example: @@ -93,7 +94,8 @@ class RFlowScheduler(Scheduler): use_discrete_timesteps = True, sample_method = 'logit-normal', use_timestep_transform = True, - base_img_size_numel = 32 * 32 * 32 + base_img_size_numel = 32 * 32 * 32, + spatial_dim = 3 ) # during training @@ -139,10 +141,12 @@ def __init__( transform_scale: float = 1.0, steps_offset: int = 0, base_img_size_numel: int = 32 * 32 * 32, + spatial_dim: int = 3 ): self.num_train_timesteps = num_train_timesteps self.use_discrete_timesteps = use_discrete_timesteps self.base_img_size_numel = base_img_size_numel + self.spatial_dim = spatial_dim # sample method if sample_method not in ["uniform", "logit-normal"]: @@ -166,7 +170,7 @@ def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timeste Args: original_samples: original samples noise: noise to add to samples - timesteps: timesteps tensor indicating the timestep to be computed for each sample. + timesteps: timesteps tensor with shape of (N,), indicating the timestep to be computed for each sample. Returns: noisy_samples: sample with added noise @@ -175,14 +179,14 @@ def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timeste timepoints = 1 - timepoints # [1,1/1000] # expand timepoint to noise shape + # Just in case timepoints is not 1D or 2D tensor, make it to be same shape as noise if len(noise.shape) == 5: timepoints = timepoints.unsqueeze(1).unsqueeze(1).unsqueeze(1).unsqueeze(1) timepoints = timepoints.repeat(1, noise.shape[1], noise.shape[2], noise.shape[3], noise.shape[4]) elif len(noise.shape) == 4: timepoints = timepoints.unsqueeze(1).unsqueeze(1).unsqueeze(1) timepoints = timepoints.repeat(1, noise.shape[1], noise.shape[2], noise.shape[3]) - else: - raise ValueError(f"noise has to be 4D or 5D tensor. yet got shape of {noise.shape}.") + noisy_samples: torch.Tensor = timepoints * original_samples + (1 - timepoints) * noise return noisy_samples @@ -223,6 +227,7 @@ def set_timesteps( input_img_size_numel=input_img_size_numel, base_img_size_numel=self.base_img_size_numel, num_train_timesteps=self.num_train_timesteps, + spatial_dim = self.spatial_dim ) for t in timesteps ] @@ -257,6 +262,7 @@ def sample_timesteps(self, x_start): input_img_size_numel=input_img_size_numel, base_img_size_numel=self.base_img_size_numel, num_train_timesteps=self.num_train_timesteps, + spatial_dim = len(x_start.shape)-2 ) return t From 20aa7fd19c1c536804a20ec126a24d6c059953fc Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 15:11:15 +0000 Subject: [PATCH 47/55] make it 2D/3D compartible, rm a outdated comment Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 1 - 1 file changed, 1 deletion(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 5596cfffcb..840c6fd542 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -179,7 +179,6 @@ def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timeste timepoints = 1 - timepoints # [1,1/1000] # expand timepoint to noise shape - # Just in case timepoints is not 1D or 2D tensor, make it to be same shape as noise if len(noise.shape) == 5: timepoints = timepoints.unsqueeze(1).unsqueeze(1).unsqueeze(1).unsqueeze(1) timepoints = timepoints.repeat(1, noise.shape[1], noise.shape[2], noise.shape[3], noise.shape[4]) From 3144c8ae93dbd60f68b795c424106445e3183112 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 15:18:08 +0000 Subject: [PATCH 48/55] make it 2D/3D compartible Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 840c6fd542..40affb26ba 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -141,7 +141,7 @@ def __init__( transform_scale: float = 1.0, steps_offset: int = 0, base_img_size_numel: int = 32 * 32 * 32, - spatial_dim: int = 3 + spatial_dim: int = 3, ): self.num_train_timesteps = num_train_timesteps self.use_discrete_timesteps = use_discrete_timesteps @@ -179,12 +179,12 @@ def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timeste timepoints = 1 - timepoints # [1,1/1000] # expand timepoint to noise shape - if len(noise.shape) == 5: - timepoints = timepoints.unsqueeze(1).unsqueeze(1).unsqueeze(1).unsqueeze(1) - timepoints = timepoints.repeat(1, noise.shape[1], noise.shape[2], noise.shape[3], noise.shape[4]) - elif len(noise.shape) == 4: - timepoints = timepoints.unsqueeze(1).unsqueeze(1).unsqueeze(1) - timepoints = timepoints.repeat(1, noise.shape[1], noise.shape[2], noise.shape[3]) + if noise.ndim == 5: + timepoints = timepoints[..., None, None, None, None].expand(-1, *noise.shape[1:]) + elif noise.ndim == 4: + timepoints = timepoints[..., None, None, None].expand(-1, *noise.shape[1:]) + else: + raise ValueError(f"noise tensor has to be 4D or 5D tensor, yet got shape of {noise.shape}") noisy_samples: torch.Tensor = timepoints * original_samples + (1 - timepoints) * noise @@ -226,7 +226,7 @@ def set_timesteps( input_img_size_numel=input_img_size_numel, base_img_size_numel=self.base_img_size_numel, num_train_timesteps=self.num_train_timesteps, - spatial_dim = self.spatial_dim + spatial_dim=self.spatial_dim, ) for t in timesteps ] @@ -261,7 +261,7 @@ def sample_timesteps(self, x_start): input_img_size_numel=input_img_size_numel, base_img_size_numel=self.base_img_size_numel, num_train_timesteps=self.num_train_timesteps, - spatial_dim = len(x_start.shape)-2 + spatial_dim=len(x_start.shape) - 2, ) return t From 0bf00415ccfa794f89319f3cf8fa60432d9b4ec3 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 20:00:22 +0000 Subject: [PATCH 49/55] add more test Signed-off-by: Can-Zhao --- monai/networks/schedulers/rectified_flow.py | 2 +- tests/inferers/test_controlnet_inferers.py | 130 +++++++++++++----- tests/inferers/test_diffusion_inferer.py | 82 +++++++++++ .../schedulers/test_scheduler_rflow.py | 89 ++++++++++++ 4 files changed, 269 insertions(+), 34 deletions(-) create mode 100644 tests/networks/schedulers/test_scheduler_rflow.py diff --git a/monai/networks/schedulers/rectified_flow.py b/monai/networks/schedulers/rectified_flow.py index 40affb26ba..452160ae0c 100644 --- a/monai/networks/schedulers/rectified_flow.py +++ b/monai/networks/schedulers/rectified_flow.py @@ -55,7 +55,7 @@ def timestep_transform( torch.Tensor: Transformed timestep(s). """ t = t / num_train_timesteps - ratio_space = (input_img_size_numel / base_img_size_numel).pow(1.0 / spatial_dim) + ratio_space = (input_img_size_numel / base_img_size_numel) ** (1.0 / spatial_dim) ratio = ratio_space * scale new_t = ratio * t / (1 + (ratio - 1) * t) diff --git a/tests/inferers/test_controlnet_inferers.py b/tests/inferers/test_controlnet_inferers.py index 909f2cf398..dea0c10472 100644 --- a/tests/inferers/test_controlnet_inferers.py +++ b/tests/inferers/test_controlnet_inferers.py @@ -26,7 +26,7 @@ SPADEAutoencoderKL, SPADEDiffusionModelUNet, ) -from monai.networks.schedulers import DDIMScheduler, DDPMScheduler +from monai.networks.schedulers import DDIMScheduler, DDPMScheduler, RFlowScheduler from monai.utils import optional_import _, has_scipy = optional_import("scipy") @@ -545,6 +545,32 @@ def test_ddim_sampler(self, model_params, controlnet_params, input_shape): ) self.assertEqual(len(intermediates), 10) + @parameterized.expand(CNDM_TEST_CASES) + @skipUnless(has_einops, "Requires einops") + def test_rflow_sampler(self, model_params, controlnet_params, input_shape): + model = DiffusionModelUNet(**model_params) + controlnet = ControlNet(**controlnet_params) + device = "cuda:0" if torch.cuda.is_available() else "cpu" + model.to(device) + model.eval() + controlnet.to(device) + controlnet.eval() + mask = torch.randn(input_shape).to(device) + noise = torch.randn(input_shape).to(device) + scheduler = RFlowScheduler(num_train_timesteps=1000) + inferer = ControlNetDiffusionInferer(scheduler=scheduler) + scheduler.set_timesteps(num_inference_steps=10) + sample, intermediates = inferer.sample( + input_noise=noise, + diffusion_model=model, + scheduler=scheduler, + controlnet=controlnet, + cn_cond=mask, + save_intermediates=True, + intermediate_steps=1, + ) + self.assertEqual(len(intermediates), 10) + @parameterized.expand(CNDM_TEST_CASES) @skipUnless(has_einops, "Requires einops") def test_sampler_conditioned(self, model_params, controlnet_params, input_shape): @@ -561,6 +587,8 @@ def test_sampler_conditioned(self, model_params, controlnet_params, input_shape) controlnet.eval() mask = torch.randn(input_shape).to(device) noise = torch.randn(input_shape).to(device) + + # DDIM scheduler = DDIMScheduler(num_train_timesteps=1000) inferer = ControlNetDiffusionInferer(scheduler=scheduler) scheduler.set_timesteps(num_inference_steps=10) @@ -577,6 +605,23 @@ def test_sampler_conditioned(self, model_params, controlnet_params, input_shape) ) self.assertEqual(len(intermediates), 10) + # RFlow + scheduler = RFlowScheduler(num_train_timesteps=1000) + inferer = ControlNetDiffusionInferer(scheduler=scheduler) + scheduler.set_timesteps(num_inference_steps=10) + conditioning = torch.randn([input_shape[0], 1, 3]).to(device) + sample, intermediates = inferer.sample( + input_noise=noise, + diffusion_model=model, + controlnet=controlnet, + cn_cond=mask, + scheduler=scheduler, + save_intermediates=True, + intermediate_steps=1, + conditioning=conditioning, + ) + self.assertEqual(len(intermediates), 10) + @parameterized.expand(CNDM_TEST_CASES) @skipUnless(has_einops, "Requires einops") def test_get_likelihood(self, model_params, controlnet_params, input_shape): @@ -638,6 +683,8 @@ def test_sampler_conditioned_concat(self, model_params, controlnet_params, input conditioning_shape = list(input_shape) conditioning_shape[1] = n_concat_channel conditioning = torch.randn(conditioning_shape).to(device) + + # DDIM scheduler = DDIMScheduler(num_train_timesteps=1000) inferer = ControlNetDiffusionInferer(scheduler=scheduler) scheduler.set_timesteps(num_inference_steps=10) @@ -654,6 +701,23 @@ def test_sampler_conditioned_concat(self, model_params, controlnet_params, input ) self.assertEqual(len(intermediates), 10) + # RFlow + scheduler = RFlowScheduler(num_train_timesteps=1000) + inferer = ControlNetDiffusionInferer(scheduler=scheduler) + scheduler.set_timesteps(num_inference_steps=10) + sample, intermediates = inferer.sample( + input_noise=noise, + diffusion_model=model, + controlnet=controlnet, + cn_cond=mask, + scheduler=scheduler, + save_intermediates=True, + intermediate_steps=1, + conditioning=conditioning, + mode="concat", + ) + self.assertEqual(len(intermediates), 10) + class LatentControlNetTestDiffusionSamplingInferer(unittest.TestCase): @parameterized.expand(LATENT_CNDM_TEST_CASES) @@ -691,39 +755,39 @@ def test_prediction_shape( input = torch.randn(input_shape).to(device) mask = torch.randn(input_shape).to(device) noise = torch.randn(latent_shape).to(device) - scheduler = DDPMScheduler(num_train_timesteps=10) - inferer = ControlNetLatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) - scheduler.set_timesteps(num_inference_steps=10) - timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() - - if dm_model_type == "SPADEDiffusionModelUNet": - input_shape_seg = list(input_shape) - if "label_nc" in stage_2_params.keys(): - input_shape_seg[1] = stage_2_params["label_nc"] + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: + inferer = ControlNetLatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) + scheduler.set_timesteps(num_inference_steps=10) + timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() + + if dm_model_type == "SPADEDiffusionModelUNet": + input_shape_seg = list(input_shape) + if "label_nc" in stage_2_params.keys(): + input_shape_seg[1] = stage_2_params["label_nc"] + else: + input_shape_seg[1] = autoencoder_params["label_nc"] + input_seg = torch.randn(input_shape_seg).to(device) + prediction = inferer( + inputs=input, + autoencoder_model=stage_1, + diffusion_model=stage_2, + controlnet=controlnet, + cn_cond=mask, + seg=input_seg, + noise=noise, + timesteps=timesteps, + ) else: - input_shape_seg[1] = autoencoder_params["label_nc"] - input_seg = torch.randn(input_shape_seg).to(device) - prediction = inferer( - inputs=input, - autoencoder_model=stage_1, - diffusion_model=stage_2, - controlnet=controlnet, - cn_cond=mask, - seg=input_seg, - noise=noise, - timesteps=timesteps, - ) - else: - prediction = inferer( - inputs=input, - autoencoder_model=stage_1, - diffusion_model=stage_2, - noise=noise, - timesteps=timesteps, - controlnet=controlnet, - cn_cond=mask, - ) - self.assertEqual(prediction.shape, latent_shape) + prediction = inferer( + inputs=input, + autoencoder_model=stage_1, + diffusion_model=stage_2, + noise=noise, + timesteps=timesteps, + controlnet=controlnet, + cn_cond=mask, + ) + self.assertEqual(prediction.shape, latent_shape) @parameterized.expand(LATENT_CNDM_TEST_CASES) @skipUnless(has_einops, "Requires einops") diff --git a/tests/inferers/test_diffusion_inferer.py b/tests/inferers/test_diffusion_inferer.py index 6b74452288..59b320d8a7 100644 --- a/tests/inferers/test_diffusion_inferer.py +++ b/tests/inferers/test_diffusion_inferer.py @@ -160,6 +160,30 @@ def test_sampler_conditioned(self, model_params, input_shape): ) self.assertEqual(len(intermediates), 10) + @parameterized.expand(TEST_CASES) + @skipUnless(has_einops, "Requires einops") + def test_sampler_conditioned_rflow(self, model_params, input_shape): + model_params["with_conditioning"] = True + model_params["cross_attention_dim"] = 3 + model = DiffusionModelUNet(**model_params) + device = "cuda:0" if torch.cuda.is_available() else "cpu" + model.to(device) + model.eval() + noise = torch.randn(input_shape).to(device) + scheduler = RFlowScheduler(num_train_timesteps=1000) + inferer = DiffusionInferer(scheduler=scheduler) + scheduler.set_timesteps(num_inference_steps=10) + conditioning = torch.randn([input_shape[0], 1, 3]).to(device) + sample, intermediates = inferer.sample( + input_noise=noise, + diffusion_model=model, + scheduler=scheduler, + save_intermediates=True, + intermediate_steps=1, + conditioning=conditioning, + ) + self.assertEqual(len(intermediates), 10) + @parameterized.expand(TEST_CASES) @skipUnless(has_einops, "Requires einops") def test_get_likelihood(self, model_params, input_shape): @@ -220,6 +244,37 @@ def test_sampler_conditioned_concat(self, model_params, input_shape): ) self.assertEqual(len(intermediates), 10) + @parameterized.expand(TEST_CASES) + @skipUnless(has_einops, "Requires einops") + def test_sampler_conditioned_concat_rflow(self, model_params, input_shape): + # copy the model_params dict to prevent from modifying test cases + model_params = model_params.copy() + n_concat_channel = 2 + model_params["in_channels"] = model_params["in_channels"] + n_concat_channel + model_params["cross_attention_dim"] = None + model_params["with_conditioning"] = False + model = DiffusionModelUNet(**model_params) + device = "cuda:0" if torch.cuda.is_available() else "cpu" + model.to(device) + model.eval() + noise = torch.randn(input_shape).to(device) + conditioning_shape = list(input_shape) + conditioning_shape[1] = n_concat_channel + conditioning = torch.randn(conditioning_shape).to(device) + scheduler = RFlowScheduler(num_train_timesteps=1000) + inferer = DiffusionInferer(scheduler=scheduler) + scheduler.set_timesteps(num_inference_steps=10) + sample, intermediates = inferer.sample( + input_noise=noise, + diffusion_model=model, + scheduler=scheduler, + save_intermediates=True, + intermediate_steps=1, + conditioning=conditioning, + mode="concat", + ) + self.assertEqual(len(intermediates), 10) + @parameterized.expand(TEST_CASES) @skipUnless(has_einops, "Requires einops") def test_call_conditioned_concat(self, model_params, input_shape): @@ -247,6 +302,33 @@ def test_call_conditioned_concat(self, model_params, input_shape): ) self.assertEqual(sample.shape, input_shape) + @parameterized.expand(TEST_CASES) + @skipUnless(has_einops, "Requires einops") + def test_call_conditioned_concat_rflow(self, model_params, input_shape): + # copy the model_params dict to prevent from modifying test cases + model_params = model_params.copy() + n_concat_channel = 2 + model_params["in_channels"] = model_params["in_channels"] + n_concat_channel + model_params["cross_attention_dim"] = None + model_params["with_conditioning"] = False + model = DiffusionModelUNet(**model_params) + device = "cuda:0" if torch.cuda.is_available() else "cpu" + model.to(device) + model.eval() + input = torch.randn(input_shape).to(device) + noise = torch.randn(input_shape).to(device) + conditioning_shape = list(input_shape) + conditioning_shape[1] = n_concat_channel + conditioning = torch.randn(conditioning_shape).to(device) + scheduler = RFlowScheduler(num_train_timesteps=1000) + inferer = DiffusionInferer(scheduler=scheduler) + scheduler.set_timesteps(num_inference_steps=10) + timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() + sample = inferer( + inputs=input, noise=noise, diffusion_model=model, timesteps=timesteps, condition=conditioning, mode="concat" + ) + self.assertEqual(sample.shape, input_shape) + if __name__ == "__main__": unittest.main() diff --git a/tests/networks/schedulers/test_scheduler_rflow.py b/tests/networks/schedulers/test_scheduler_rflow.py new file mode 100644 index 0000000000..ffddfa73fd --- /dev/null +++ b/tests/networks/schedulers/test_scheduler_rflow.py @@ -0,0 +1,89 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import torch +from parameterized import parameterized + +from monai.networks.schedulers import RFlowScheduler +from tests.test_utils import assert_allclose + +TEST_2D_CASE = [] +for sample_method in ["uniform", "logit-normal"]: + TEST_2D_CASE.append([{"sample_method": sample_method, "use_timestep_transform": False}, (2, 6, 16, 16), (2, 6, 16, 16)]) + +for sample_method in ["uniform", "logit-normal"]: + TEST_2D_CASE.append([{"sample_method": sample_method, "use_timestep_transform": True, "spatial_dim": 2}, (2, 6, 16, 16), (2, 6, 16, 16)]) + + +TEST_3D_CASE = [] +for sample_method in ["uniform", "logit-normal"]: + TEST_3D_CASE.append([{"sample_method": sample_method, "use_timestep_transform": False}, (2, 6, 16, 16, 16), (2, 6, 16, 16, 16)]) + +for sample_method in ["uniform", "logit-normal"]: + TEST_3D_CASE.append([{"sample_method": sample_method, "use_timestep_transform": True, "spatial_dim": 3}, (2, 6, 16, 16, 16), (2, 6, 16, 16, 16)]) + +TEST_CASES = TEST_2D_CASE + TEST_3D_CASE + +TEST_FULl_LOOP = [ + [{"sample_method": "uniform"}, (1, 1, 2, 2), torch.Tensor([[[[-0.786166, -0.057519], [2.442662, -0.407664]]]])] +] + + +class TestRFlowScheduler(unittest.TestCase): + @parameterized.expand(TEST_CASES) + def test_add_noise(self, input_param, input_shape, expected_shape): + scheduler = RFlowScheduler(**input_param) + original_sample = torch.zeros(input_shape) + timesteps = scheduler.sample_timesteps(original_sample) + noise = torch.randn_like(original_sample) + timesteps = torch.randint(0, scheduler.num_train_timesteps, (original_sample.shape[0],)).long() + noisy = scheduler.add_noise(original_samples=original_sample, noise=noise, timesteps=timesteps) + self.assertEqual(noisy.shape, expected_shape) + + @parameterized.expand(TEST_CASES) + def test_step_shape(self, input_param, input_shape, expected_shape): + scheduler = RFlowScheduler(**input_param) + model_output = torch.randn(input_shape) + sample = torch.randn(input_shape) + scheduler.set_timesteps(num_inference_steps=100, input_img_size_numel=torch.numel(sample[0,0,...])) + output_step = scheduler.step(model_output=model_output, timestep=500, sample=sample) + self.assertEqual(output_step[0].shape, expected_shape) + self.assertEqual(output_step[1].shape, expected_shape) + + @parameterized.expand(TEST_FULl_LOOP) + def test_full_timestep_loop(self, input_param, input_shape, expected_output): + scheduler = RFlowScheduler(**input_param) + torch.manual_seed(42) + model_output = torch.randn(input_shape) + sample = torch.randn(input_shape) + scheduler.set_timesteps(50, input_img_size_numel=torch.numel(sample[0,0,...])) + for t in range(50): + sample, _ = scheduler.step(model_output=model_output, timestep=t, sample=sample) + assert_allclose(sample, expected_output, rtol=1e-3, atol=1e-3) + + def test_set_timesteps(self): + scheduler = RFlowScheduler(num_train_timesteps=1000) + scheduler.set_timesteps(num_inference_steps=100, input_img_size_numel=16*16*16) + self.assertEqual(scheduler.num_inference_steps, 100) + self.assertEqual(len(scheduler.timesteps), 100) + + def test_set_timesteps_with_num_inference_steps_bigger_than_num_train_timesteps(self): + scheduler = RFlowScheduler(num_train_timesteps=1000) + with self.assertRaises(ValueError): + scheduler.set_timesteps(num_inference_steps=2000, input_img_size_numel=16*16*16) + + +if __name__ == "__main__": + unittest.main() From acb5a5c318d6aa419d0b5f09768acbe2424c86dc Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Mar 2025 20:01:06 +0000 Subject: [PATCH 50/55] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/inferers/test_controlnet_inferers.py | 4 ++-- tests/networks/schedulers/test_scheduler_rflow.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/inferers/test_controlnet_inferers.py b/tests/inferers/test_controlnet_inferers.py index dea0c10472..a7e39dec19 100644 --- a/tests/inferers/test_controlnet_inferers.py +++ b/tests/inferers/test_controlnet_inferers.py @@ -587,7 +587,7 @@ def test_sampler_conditioned(self, model_params, controlnet_params, input_shape) controlnet.eval() mask = torch.randn(input_shape).to(device) noise = torch.randn(input_shape).to(device) - + # DDIM scheduler = DDIMScheduler(num_train_timesteps=1000) inferer = ControlNetDiffusionInferer(scheduler=scheduler) @@ -759,7 +759,7 @@ def test_prediction_shape( inferer = ControlNetLatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) scheduler.set_timesteps(num_inference_steps=10) timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() - + if dm_model_type == "SPADEDiffusionModelUNet": input_shape_seg = list(input_shape) if "label_nc" in stage_2_params.keys(): diff --git a/tests/networks/schedulers/test_scheduler_rflow.py b/tests/networks/schedulers/test_scheduler_rflow.py index ffddfa73fd..be17b8086d 100644 --- a/tests/networks/schedulers/test_scheduler_rflow.py +++ b/tests/networks/schedulers/test_scheduler_rflow.py @@ -54,7 +54,7 @@ def test_add_noise(self, input_param, input_shape, expected_shape): @parameterized.expand(TEST_CASES) def test_step_shape(self, input_param, input_shape, expected_shape): - scheduler = RFlowScheduler(**input_param) + scheduler = RFlowScheduler(**input_param) model_output = torch.randn(input_shape) sample = torch.randn(input_shape) scheduler.set_timesteps(num_inference_steps=100, input_img_size_numel=torch.numel(sample[0,0,...])) @@ -64,7 +64,7 @@ def test_step_shape(self, input_param, input_shape, expected_shape): @parameterized.expand(TEST_FULl_LOOP) def test_full_timestep_loop(self, input_param, input_shape, expected_output): - scheduler = RFlowScheduler(**input_param) + scheduler = RFlowScheduler(**input_param) torch.manual_seed(42) model_output = torch.randn(input_shape) sample = torch.randn(input_shape) From e320ecc920e33caa8acf3d6af28dbe015ffe6355 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 20:05:54 +0000 Subject: [PATCH 51/55] reformat Signed-off-by: Can-Zhao --- monai/inferers/inferer.py | 18 +++++++--- tests/inferers/test_controlnet_inferers.py | 4 +-- .../schedulers/test_scheduler_rflow.py | 36 +++++++++++++------ 3 files changed, 42 insertions(+), 16 deletions(-) diff --git a/monai/inferers/inferer.py b/monai/inferers/inferer.py index 3607c6d57e..2928db2256 100644 --- a/monai/inferers/inferer.py +++ b/monai/inferers/inferer.py @@ -1402,12 +1402,18 @@ def sample( # type: ignore[override] if not scheduler: scheduler = self.scheduler image = input_noise + + all_next_timesteps = torch.cat((scheduler.timesteps[1:], torch.tensor([0], dtype=scheduler.timesteps.dtype))) if verbose and has_tqdm: - progress_bar = tqdm(scheduler.timesteps) + progress_bar = tqdm( + zip(scheduler.timesteps, all_next_timesteps), + total=min(len(scheduler.timesteps), len(all_next_timesteps)), + ) else: - progress_bar = iter(scheduler.timesteps) + progress_bar = iter(zip(scheduler.timesteps, all_next_timesteps)) intermediates = [] - for t in progress_bar: + + for t, next_t in progress_bar: diffuse = diffusion_model if isinstance(diffusion_model, SPADEDiffusionModelUNet): diffuse = partial(diffusion_model, seg=seg) @@ -1446,7 +1452,11 @@ def sample( # type: ignore[override] ) # 3. compute previous image: x_t -> x_t-1 - image, _ = scheduler.step(model_output, t, image) # type: ignore[operator] + if not isinstance(scheduler, RFlowScheduler): + image, _ = scheduler.step(model_output, t, image) + else: + image, _ = scheduler.step(model_output, t, image, next_t) + if save_intermediates and t % intermediate_steps == 0: intermediates.append(image) if save_intermediates: diff --git a/tests/inferers/test_controlnet_inferers.py b/tests/inferers/test_controlnet_inferers.py index dea0c10472..1ce81a71d5 100644 --- a/tests/inferers/test_controlnet_inferers.py +++ b/tests/inferers/test_controlnet_inferers.py @@ -587,7 +587,7 @@ def test_sampler_conditioned(self, model_params, controlnet_params, input_shape) controlnet.eval() mask = torch.randn(input_shape).to(device) noise = torch.randn(input_shape).to(device) - + # DDIM scheduler = DDIMScheduler(num_train_timesteps=1000) inferer = ControlNetDiffusionInferer(scheduler=scheduler) @@ -755,11 +755,11 @@ def test_prediction_shape( input = torch.randn(input_shape).to(device) mask = torch.randn(input_shape).to(device) noise = torch.randn(latent_shape).to(device) + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: inferer = ControlNetLatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) scheduler.set_timesteps(num_inference_steps=10) timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() - if dm_model_type == "SPADEDiffusionModelUNet": input_shape_seg = list(input_shape) if "label_nc" in stage_2_params.keys(): diff --git a/tests/networks/schedulers/test_scheduler_rflow.py b/tests/networks/schedulers/test_scheduler_rflow.py index ffddfa73fd..08f4ed3730 100644 --- a/tests/networks/schedulers/test_scheduler_rflow.py +++ b/tests/networks/schedulers/test_scheduler_rflow.py @@ -21,18 +21,34 @@ TEST_2D_CASE = [] for sample_method in ["uniform", "logit-normal"]: - TEST_2D_CASE.append([{"sample_method": sample_method, "use_timestep_transform": False}, (2, 6, 16, 16), (2, 6, 16, 16)]) + TEST_2D_CASE.append( + [{"sample_method": sample_method, "use_timestep_transform": False}, (2, 6, 16, 16), (2, 6, 16, 16)] + ) for sample_method in ["uniform", "logit-normal"]: - TEST_2D_CASE.append([{"sample_method": sample_method, "use_timestep_transform": True, "spatial_dim": 2}, (2, 6, 16, 16), (2, 6, 16, 16)]) + TEST_2D_CASE.append( + [ + {"sample_method": sample_method, "use_timestep_transform": True, "spatial_dim": 2}, + (2, 6, 16, 16), + (2, 6, 16, 16), + ] + ) TEST_3D_CASE = [] for sample_method in ["uniform", "logit-normal"]: - TEST_3D_CASE.append([{"sample_method": sample_method, "use_timestep_transform": False}, (2, 6, 16, 16, 16), (2, 6, 16, 16, 16)]) + TEST_3D_CASE.append( + [{"sample_method": sample_method, "use_timestep_transform": False}, (2, 6, 16, 16, 16), (2, 6, 16, 16, 16)] + ) for sample_method in ["uniform", "logit-normal"]: - TEST_3D_CASE.append([{"sample_method": sample_method, "use_timestep_transform": True, "spatial_dim": 3}, (2, 6, 16, 16, 16), (2, 6, 16, 16, 16)]) + TEST_3D_CASE.append( + [ + {"sample_method": sample_method, "use_timestep_transform": True, "spatial_dim": 3}, + (2, 6, 16, 16, 16), + (2, 6, 16, 16, 16), + ] + ) TEST_CASES = TEST_2D_CASE + TEST_3D_CASE @@ -54,35 +70,35 @@ def test_add_noise(self, input_param, input_shape, expected_shape): @parameterized.expand(TEST_CASES) def test_step_shape(self, input_param, input_shape, expected_shape): - scheduler = RFlowScheduler(**input_param) + scheduler = RFlowScheduler(**input_param) model_output = torch.randn(input_shape) sample = torch.randn(input_shape) - scheduler.set_timesteps(num_inference_steps=100, input_img_size_numel=torch.numel(sample[0,0,...])) + scheduler.set_timesteps(num_inference_steps=100, input_img_size_numel=torch.numel(sample[0, 0, ...])) output_step = scheduler.step(model_output=model_output, timestep=500, sample=sample) self.assertEqual(output_step[0].shape, expected_shape) self.assertEqual(output_step[1].shape, expected_shape) @parameterized.expand(TEST_FULl_LOOP) def test_full_timestep_loop(self, input_param, input_shape, expected_output): - scheduler = RFlowScheduler(**input_param) + scheduler = RFlowScheduler(**input_param) torch.manual_seed(42) model_output = torch.randn(input_shape) sample = torch.randn(input_shape) - scheduler.set_timesteps(50, input_img_size_numel=torch.numel(sample[0,0,...])) + scheduler.set_timesteps(50, input_img_size_numel=torch.numel(sample[0, 0, ...])) for t in range(50): sample, _ = scheduler.step(model_output=model_output, timestep=t, sample=sample) assert_allclose(sample, expected_output, rtol=1e-3, atol=1e-3) def test_set_timesteps(self): scheduler = RFlowScheduler(num_train_timesteps=1000) - scheduler.set_timesteps(num_inference_steps=100, input_img_size_numel=16*16*16) + scheduler.set_timesteps(num_inference_steps=100, input_img_size_numel=16 * 16 * 16) self.assertEqual(scheduler.num_inference_steps, 100) self.assertEqual(len(scheduler.timesteps), 100) def test_set_timesteps_with_num_inference_steps_bigger_than_num_train_timesteps(self): scheduler = RFlowScheduler(num_train_timesteps=1000) with self.assertRaises(ValueError): - scheduler.set_timesteps(num_inference_steps=2000, input_img_size_numel=16*16*16) + scheduler.set_timesteps(num_inference_steps=2000, input_img_size_numel=16 * 16 * 16) if __name__ == "__main__": From c2e3cb500b86e50ee39b0fce8bcb4688df7e9dd5 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 20:14:56 +0000 Subject: [PATCH 52/55] add more test Signed-off-by: Can-Zhao --- .../inferers/test_latent_diffusion_inferer.py | 428 +++++++++--------- 1 file changed, 216 insertions(+), 212 deletions(-) diff --git a/tests/inferers/test_latent_diffusion_inferer.py b/tests/inferers/test_latent_diffusion_inferer.py index 4f81b96ca1..e750229e67 100644 --- a/tests/inferers/test_latent_diffusion_inferer.py +++ b/tests/inferers/test_latent_diffusion_inferer.py @@ -19,7 +19,7 @@ from monai.inferers import LatentDiffusionInferer from monai.networks.nets import VQVAE, AutoencoderKL, DiffusionModelUNet, SPADEAutoencoderKL, SPADEDiffusionModelUNet -from monai.networks.schedulers import DDPMScheduler +from monai.networks.schedulers import DDPMScheduler, RFlowScheduler from monai.utils import optional_import _, has_einops = optional_import("einops") @@ -339,31 +339,32 @@ def test_prediction_shape( input = torch.randn(input_shape).to(device) noise = torch.randn(latent_shape).to(device) - scheduler = DDPMScheduler(num_train_timesteps=10) - inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) - scheduler.set_timesteps(num_inference_steps=10) - timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() - - if dm_model_type == "SPADEDiffusionModelUNet": - input_shape_seg = list(input_shape) - if "label_nc" in stage_2_params.keys(): - input_shape_seg[1] = stage_2_params["label_nc"] + + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: + inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) + scheduler.set_timesteps(num_inference_steps=10) + timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() + + if dm_model_type == "SPADEDiffusionModelUNet": + input_shape_seg = list(input_shape) + if "label_nc" in stage_2_params.keys(): + input_shape_seg[1] = stage_2_params["label_nc"] + else: + input_shape_seg[1] = autoencoder_params["label_nc"] + input_seg = torch.randn(input_shape_seg).to(device) + prediction = inferer( + inputs=input, + autoencoder_model=stage_1, + diffusion_model=stage_2, + seg=input_seg, + noise=noise, + timesteps=timesteps, + ) else: - input_shape_seg[1] = autoencoder_params["label_nc"] - input_seg = torch.randn(input_shape_seg).to(device) - prediction = inferer( - inputs=input, - autoencoder_model=stage_1, - diffusion_model=stage_2, - seg=input_seg, - noise=noise, - timesteps=timesteps, - ) - else: - prediction = inferer( - inputs=input, autoencoder_model=stage_1, diffusion_model=stage_2, noise=noise, timesteps=timesteps - ) - self.assertEqual(prediction.shape, latent_shape) + prediction = inferer( + inputs=input, autoencoder_model=stage_1, diffusion_model=stage_2, noise=noise, timesteps=timesteps + ) + self.assertEqual(prediction.shape, latent_shape) @parameterized.expand(TEST_CASES) @skipUnless(has_einops, "Requires einops") @@ -388,29 +389,30 @@ def test_sample_shape( stage_2.eval() noise = torch.randn(latent_shape).to(device) - scheduler = DDPMScheduler(num_train_timesteps=10) - inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) - scheduler.set_timesteps(num_inference_steps=10) - if ae_model_type == "SPADEAutoencoderKL" or dm_model_type == "SPADEDiffusionModelUNet": - input_shape_seg = list(input_shape) - if "label_nc" in stage_2_params.keys(): - input_shape_seg[1] = stage_2_params["label_nc"] + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: + inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) + scheduler.set_timesteps(num_inference_steps=10) + + if ae_model_type == "SPADEAutoencoderKL" or dm_model_type == "SPADEDiffusionModelUNet": + input_shape_seg = list(input_shape) + if "label_nc" in stage_2_params.keys(): + input_shape_seg[1] = stage_2_params["label_nc"] + else: + input_shape_seg[1] = autoencoder_params["label_nc"] + input_seg = torch.randn(input_shape_seg).to(device) + sample = inferer.sample( + input_noise=noise, + autoencoder_model=stage_1, + diffusion_model=stage_2, + scheduler=scheduler, + seg=input_seg, + ) else: - input_shape_seg[1] = autoencoder_params["label_nc"] - input_seg = torch.randn(input_shape_seg).to(device) - sample = inferer.sample( - input_noise=noise, - autoencoder_model=stage_1, - diffusion_model=stage_2, - scheduler=scheduler, - seg=input_seg, - ) - else: - sample = inferer.sample( - input_noise=noise, autoencoder_model=stage_1, diffusion_model=stage_2, scheduler=scheduler - ) - self.assertEqual(sample.shape, input_shape) + sample = inferer.sample( + input_noise=noise, autoencoder_model=stage_1, diffusion_model=stage_2, scheduler=scheduler + ) + self.assertEqual(sample.shape, input_shape) @parameterized.expand(TEST_CASES) @skipUnless(has_einops, "Requires einops") @@ -437,37 +439,38 @@ def test_sample_intermediates( stage_2.eval() noise = torch.randn(latent_shape).to(device) - scheduler = DDPMScheduler(num_train_timesteps=10) - inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) - scheduler.set_timesteps(num_inference_steps=10) - if ae_model_type == "SPADEAutoencoderKL" or dm_model_type == "SPADEDiffusionModelUNet": - input_shape_seg = list(input_shape) - if "label_nc" in stage_2_params.keys(): - input_shape_seg[1] = stage_2_params["label_nc"] + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: + inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) + scheduler.set_timesteps(num_inference_steps=10) + + if ae_model_type == "SPADEAutoencoderKL" or dm_model_type == "SPADEDiffusionModelUNet": + input_shape_seg = list(input_shape) + if "label_nc" in stage_2_params.keys(): + input_shape_seg[1] = stage_2_params["label_nc"] + else: + input_shape_seg[1] = autoencoder_params["label_nc"] + input_seg = torch.randn(input_shape_seg).to(device) + sample, intermediates = inferer.sample( + input_noise=noise, + autoencoder_model=stage_1, + diffusion_model=stage_2, + scheduler=scheduler, + seg=input_seg, + save_intermediates=True, + intermediate_steps=1, + ) else: - input_shape_seg[1] = autoencoder_params["label_nc"] - input_seg = torch.randn(input_shape_seg).to(device) - sample, intermediates = inferer.sample( - input_noise=noise, - autoencoder_model=stage_1, - diffusion_model=stage_2, - scheduler=scheduler, - seg=input_seg, - save_intermediates=True, - intermediate_steps=1, - ) - else: - sample, intermediates = inferer.sample( - input_noise=noise, - autoencoder_model=stage_1, - diffusion_model=stage_2, - scheduler=scheduler, - save_intermediates=True, - intermediate_steps=1, - ) - self.assertEqual(len(intermediates), 10) - self.assertEqual(intermediates[0].shape, input_shape) + sample, intermediates = inferer.sample( + input_noise=noise, + autoencoder_model=stage_1, + diffusion_model=stage_2, + scheduler=scheduler, + save_intermediates=True, + intermediate_steps=1, + ) + self.assertEqual(len(intermediates), 10) + self.assertEqual(intermediates[0].shape, input_shape) @parameterized.expand(TEST_CASES) @skipUnless(has_einops, "Requires einops") @@ -614,40 +617,40 @@ def test_prediction_shape_conditioned_concat( conditioning_shape[1] = n_concat_channel conditioning = torch.randn(conditioning_shape).to(device) - scheduler = DDPMScheduler(num_train_timesteps=10) - inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) - scheduler.set_timesteps(num_inference_steps=10) - - timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() - - if dm_model_type == "SPADEDiffusionModelUNet": - input_shape_seg = list(input_shape) - if "label_nc" in stage_2_params.keys(): - input_shape_seg[1] = stage_2_params["label_nc"] + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: + inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) + scheduler.set_timesteps(num_inference_steps=10) + + timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() + + if dm_model_type == "SPADEDiffusionModelUNet": + input_shape_seg = list(input_shape) + if "label_nc" in stage_2_params.keys(): + input_shape_seg[1] = stage_2_params["label_nc"] + else: + input_shape_seg[1] = autoencoder_params["label_nc"] + input_seg = torch.randn(input_shape_seg).to(device) + prediction = inferer( + inputs=input, + autoencoder_model=stage_1, + diffusion_model=stage_2, + noise=noise, + timesteps=timesteps, + condition=conditioning, + mode="concat", + seg=input_seg, + ) else: - input_shape_seg[1] = autoencoder_params["label_nc"] - input_seg = torch.randn(input_shape_seg).to(device) - prediction = inferer( - inputs=input, - autoencoder_model=stage_1, - diffusion_model=stage_2, - noise=noise, - timesteps=timesteps, - condition=conditioning, - mode="concat", - seg=input_seg, - ) - else: - prediction = inferer( - inputs=input, - autoencoder_model=stage_1, - diffusion_model=stage_2, - noise=noise, - timesteps=timesteps, - condition=conditioning, - mode="concat", - ) - self.assertEqual(prediction.shape, latent_shape) + prediction = inferer( + inputs=input, + autoencoder_model=stage_1, + diffusion_model=stage_2, + noise=noise, + timesteps=timesteps, + condition=conditioning, + mode="concat", + ) + self.assertEqual(prediction.shape, latent_shape) @parameterized.expand(TEST_CASES) @skipUnless(has_einops, "Requires einops") @@ -681,36 +684,36 @@ def test_sample_shape_conditioned_concat( conditioning_shape[1] = n_concat_channel conditioning = torch.randn(conditioning_shape).to(device) - scheduler = DDPMScheduler(num_train_timesteps=10) - inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) - scheduler.set_timesteps(num_inference_steps=10) - - if dm_model_type == "SPADEDiffusionModelUNet": - input_shape_seg = list(input_shape) - if "label_nc" in stage_2_params.keys(): - input_shape_seg[1] = stage_2_params["label_nc"] + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: + inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) + scheduler.set_timesteps(num_inference_steps=10) + + if dm_model_type == "SPADEDiffusionModelUNet": + input_shape_seg = list(input_shape) + if "label_nc" in stage_2_params.keys(): + input_shape_seg[1] = stage_2_params["label_nc"] + else: + input_shape_seg[1] = autoencoder_params["label_nc"] + input_seg = torch.randn(input_shape_seg).to(device) + sample = inferer.sample( + input_noise=noise, + autoencoder_model=stage_1, + diffusion_model=stage_2, + scheduler=scheduler, + conditioning=conditioning, + mode="concat", + seg=input_seg, + ) else: - input_shape_seg[1] = autoencoder_params["label_nc"] - input_seg = torch.randn(input_shape_seg).to(device) - sample = inferer.sample( - input_noise=noise, - autoencoder_model=stage_1, - diffusion_model=stage_2, - scheduler=scheduler, - conditioning=conditioning, - mode="concat", - seg=input_seg, - ) - else: - sample = inferer.sample( - input_noise=noise, - autoencoder_model=stage_1, - diffusion_model=stage_2, - scheduler=scheduler, - conditioning=conditioning, - mode="concat", - ) - self.assertEqual(sample.shape, input_shape) + sample = inferer.sample( + input_noise=noise, + autoencoder_model=stage_1, + diffusion_model=stage_2, + scheduler=scheduler, + conditioning=conditioning, + mode="concat", + ) + self.assertEqual(sample.shape, input_shape) @parameterized.expand(TEST_CASES_DIFF_SHAPES) @skipUnless(has_einops, "Requires einops") @@ -738,39 +741,39 @@ def test_shape_different_latents( input = torch.randn(input_shape).to(device) noise = torch.randn(latent_shape).to(device) - scheduler = DDPMScheduler(num_train_timesteps=10) - # We infer the VAE shape - autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]] - inferer = LatentDiffusionInferer( - scheduler=scheduler, - scale_factor=1.0, - ldm_latent_shape=list(latent_shape[2:]), - autoencoder_latent_shape=autoencoder_latent_shape, - ) - scheduler.set_timesteps(num_inference_steps=10) - - timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() - - if dm_model_type == "SPADEDiffusionModelUNet": - input_shape_seg = list(input_shape) - if "label_nc" in stage_2_params.keys(): - input_shape_seg[1] = stage_2_params["label_nc"] - else: - input_shape_seg[1] = autoencoder_params["label_nc"] - input_seg = torch.randn(input_shape_seg).to(device) - prediction = inferer( - inputs=input, - autoencoder_model=stage_1, - diffusion_model=stage_2, - noise=noise, - timesteps=timesteps, - seg=input_seg, - ) - else: - prediction = inferer( - inputs=input, autoencoder_model=stage_1, diffusion_model=stage_2, noise=noise, timesteps=timesteps + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: + # We infer the VAE shape + autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]] + inferer = LatentDiffusionInferer( + scheduler=scheduler, + scale_factor=1.0, + ldm_latent_shape=list(latent_shape[2:]), + autoencoder_latent_shape=autoencoder_latent_shape, ) - self.assertEqual(prediction.shape, latent_shape) + scheduler.set_timesteps(num_inference_steps=10) + + timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() + + if dm_model_type == "SPADEDiffusionModelUNet": + input_shape_seg = list(input_shape) + if "label_nc" in stage_2_params.keys(): + input_shape_seg[1] = stage_2_params["label_nc"] + else: + input_shape_seg[1] = autoencoder_params["label_nc"] + input_seg = torch.randn(input_shape_seg).to(device) + prediction = inferer( + inputs=input, + autoencoder_model=stage_1, + diffusion_model=stage_2, + noise=noise, + timesteps=timesteps, + seg=input_seg, + ) + else: + prediction = inferer( + inputs=input, autoencoder_model=stage_1, diffusion_model=stage_2, noise=noise, timesteps=timesteps + ) + self.assertEqual(prediction.shape, latent_shape) @parameterized.expand(TEST_CASES_DIFF_SHAPES) @skipUnless(has_einops, "Requires einops") @@ -797,40 +800,40 @@ def test_sample_shape_different_latents( stage_2.eval() noise = torch.randn(latent_shape).to(device) - scheduler = DDPMScheduler(num_train_timesteps=10) - # We infer the VAE shape - if ae_model_type == "VQVAE": - autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]))) for i in input_shape[2:]] - else: - autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]] - - inferer = LatentDiffusionInferer( - scheduler=scheduler, - scale_factor=1.0, - ldm_latent_shape=list(latent_shape[2:]), - autoencoder_latent_shape=autoencoder_latent_shape, - ) - scheduler.set_timesteps(num_inference_steps=10) - - if dm_model_type == "SPADEDiffusionModelUNet" or ae_model_type == "SPADEAutoencoderKL": - input_shape_seg = list(input_shape) - if "label_nc" in stage_2_params.keys(): - input_shape_seg[1] = stage_2_params["label_nc"] + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: + # We infer the VAE shape + if ae_model_type == "VQVAE": + autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]))) for i in input_shape[2:]] else: - input_shape_seg[1] = autoencoder_params["label_nc"] - input_seg = torch.randn(input_shape_seg).to(device) - prediction, _ = inferer.sample( - autoencoder_model=stage_1, - diffusion_model=stage_2, - input_noise=noise, - save_intermediates=True, - seg=input_seg, - ) - else: - prediction = inferer.sample( - autoencoder_model=stage_1, diffusion_model=stage_2, input_noise=noise, save_intermediates=False + autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]] + + inferer = LatentDiffusionInferer( + scheduler=scheduler, + scale_factor=1.0, + ldm_latent_shape=list(latent_shape[2:]), + autoencoder_latent_shape=autoencoder_latent_shape, ) - self.assertEqual(prediction.shape, input_shape) + scheduler.set_timesteps(num_inference_steps=10) + + if dm_model_type == "SPADEDiffusionModelUNet" or ae_model_type == "SPADEAutoencoderKL": + input_shape_seg = list(input_shape) + if "label_nc" in stage_2_params.keys(): + input_shape_seg[1] = stage_2_params["label_nc"] + else: + input_shape_seg[1] = autoencoder_params["label_nc"] + input_seg = torch.randn(input_shape_seg).to(device) + prediction, _ = inferer.sample( + autoencoder_model=stage_1, + diffusion_model=stage_2, + input_noise=noise, + save_intermediates=True, + seg=input_seg, + ) + else: + prediction = inferer.sample( + autoencoder_model=stage_1, diffusion_model=stage_2, input_noise=noise, save_intermediates=False + ) + self.assertEqual(prediction.shape, input_shape) @skipUnless(has_einops, "Requires einops") def test_incompatible_spade_setup(self): @@ -866,18 +869,19 @@ def test_incompatible_spade_setup(self): stage_2.eval() noise = torch.randn((1, 3, 4, 4)).to(device) input_seg = torch.randn((1, 3, 8, 8)).to(device) - scheduler = DDPMScheduler(num_train_timesteps=10) - inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) - scheduler.set_timesteps(num_inference_steps=10) - with self.assertRaises(ValueError): - _ = inferer.sample( - input_noise=noise, - autoencoder_model=stage_1, - diffusion_model=stage_2, - scheduler=scheduler, - seg=input_seg, - ) + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: + inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) + scheduler.set_timesteps(num_inference_steps=10) + + with self.assertRaises(ValueError): + _ = inferer.sample( + input_noise=noise, + autoencoder_model=stage_1, + diffusion_model=stage_2, + scheduler=scheduler, + seg=input_seg, + ) if __name__ == "__main__": From 40be2a6d997410531122b184916bdb1250323612 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 20:16:10 +0000 Subject: [PATCH 53/55] reformat Signed-off-by: Can-Zhao --- .../inferers/test_latent_diffusion_inferer.py | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/tests/inferers/test_latent_diffusion_inferer.py b/tests/inferers/test_latent_diffusion_inferer.py index e750229e67..c20cb5d6ff 100644 --- a/tests/inferers/test_latent_diffusion_inferer.py +++ b/tests/inferers/test_latent_diffusion_inferer.py @@ -339,12 +339,12 @@ def test_prediction_shape( input = torch.randn(input_shape).to(device) noise = torch.randn(latent_shape).to(device) - + for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) scheduler.set_timesteps(num_inference_steps=10) timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() - + if dm_model_type == "SPADEDiffusionModelUNet": input_shape_seg = list(input_shape) if "label_nc" in stage_2_params.keys(): @@ -393,7 +393,7 @@ def test_sample_shape( for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) scheduler.set_timesteps(num_inference_steps=10) - + if ae_model_type == "SPADEAutoencoderKL" or dm_model_type == "SPADEDiffusionModelUNet": input_shape_seg = list(input_shape) if "label_nc" in stage_2_params.keys(): @@ -443,7 +443,7 @@ def test_sample_intermediates( for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) scheduler.set_timesteps(num_inference_steps=10) - + if ae_model_type == "SPADEAutoencoderKL" or dm_model_type == "SPADEDiffusionModelUNet": input_shape_seg = list(input_shape) if "label_nc" in stage_2_params.keys(): @@ -620,9 +620,9 @@ def test_prediction_shape_conditioned_concat( for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) scheduler.set_timesteps(num_inference_steps=10) - + timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() - + if dm_model_type == "SPADEDiffusionModelUNet": input_shape_seg = list(input_shape) if "label_nc" in stage_2_params.keys(): @@ -687,7 +687,7 @@ def test_sample_shape_conditioned_concat( for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) scheduler.set_timesteps(num_inference_steps=10) - + if dm_model_type == "SPADEDiffusionModelUNet": input_shape_seg = list(input_shape) if "label_nc" in stage_2_params.keys(): @@ -751,9 +751,9 @@ def test_shape_different_latents( autoencoder_latent_shape=autoencoder_latent_shape, ) scheduler.set_timesteps(num_inference_steps=10) - + timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long() - + if dm_model_type == "SPADEDiffusionModelUNet": input_shape_seg = list(input_shape) if "label_nc" in stage_2_params.keys(): @@ -805,8 +805,10 @@ def test_sample_shape_different_latents( if ae_model_type == "VQVAE": autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]))) for i in input_shape[2:]] else: - autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]] - + autoencoder_latent_shape = [ + i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:] + ] + inferer = LatentDiffusionInferer( scheduler=scheduler, scale_factor=1.0, @@ -814,7 +816,7 @@ def test_sample_shape_different_latents( autoencoder_latent_shape=autoencoder_latent_shape, ) scheduler.set_timesteps(num_inference_steps=10) - + if dm_model_type == "SPADEDiffusionModelUNet" or ae_model_type == "SPADEAutoencoderKL": input_shape_seg = list(input_shape) if "label_nc" in stage_2_params.keys(): @@ -873,7 +875,7 @@ def test_incompatible_spade_setup(self): for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]: inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0) scheduler.set_timesteps(num_inference_steps=10) - + with self.assertRaises(ValueError): _ = inferer.sample( input_noise=noise, From b9ceccfe20da19e328828b2e2c023ba6a1489652 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 20:58:41 +0000 Subject: [PATCH 54/55] reformat Signed-off-by: Can-Zhao --- monai/inferers/inferer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/monai/inferers/inferer.py b/monai/inferers/inferer.py index 2928db2256..5084eeb1f7 100644 --- a/monai/inferers/inferer.py +++ b/monai/inferers/inferer.py @@ -889,9 +889,9 @@ def sample( # 2. compute previous image: x_t -> x_t-1 if not isinstance(scheduler, RFlowScheduler): - image, _ = scheduler.step(model_output, t, image) + image, _ = scheduler.step(model_output, t, image) # type: ignore else: - image, _ = scheduler.step(model_output, t, image, next_t) + image, _ = scheduler.step(model_output, t, image, next_t) # type: ignore if save_intermediates and t % intermediate_steps == 0: intermediates.append(image) @@ -1453,9 +1453,9 @@ def sample( # type: ignore[override] # 3. compute previous image: x_t -> x_t-1 if not isinstance(scheduler, RFlowScheduler): - image, _ = scheduler.step(model_output, t, image) + image, _ = scheduler.step(model_output, t, image) # type: ignore else: - image, _ = scheduler.step(model_output, t, image, next_t) + image, _ = scheduler.step(model_output, t, image, next_t) # type: ignore if save_intermediates and t % intermediate_steps == 0: intermediates.append(image) From 9685e9fe3a304ae8ec5bd5c6d7bb1b32d9333206 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 10 Mar 2025 21:21:47 +0000 Subject: [PATCH 55/55] reformat Signed-off-by: Can-Zhao --- monai/inferers/inferer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/monai/inferers/inferer.py b/monai/inferers/inferer.py index 5084eeb1f7..bfb2756ebe 100644 --- a/monai/inferers/inferer.py +++ b/monai/inferers/inferer.py @@ -889,9 +889,9 @@ def sample( # 2. compute previous image: x_t -> x_t-1 if not isinstance(scheduler, RFlowScheduler): - image, _ = scheduler.step(model_output, t, image) # type: ignore + image, _ = scheduler.step(model_output, t, image) # type: ignore else: - image, _ = scheduler.step(model_output, t, image, next_t) # type: ignore + image, _ = scheduler.step(model_output, t, image, next_t) # type: ignore if save_intermediates and t % intermediate_steps == 0: intermediates.append(image) @@ -1453,9 +1453,9 @@ def sample( # type: ignore[override] # 3. compute previous image: x_t -> x_t-1 if not isinstance(scheduler, RFlowScheduler): - image, _ = scheduler.step(model_output, t, image) # type: ignore + image, _ = scheduler.step(model_output, t, image) # type: ignore else: - image, _ = scheduler.step(model_output, t, image, next_t) # type: ignore + image, _ = scheduler.step(model_output, t, image, next_t) # type: ignore if save_intermediates and t % intermediate_steps == 0: intermediates.append(image)