diff --git a/.github/workflows/cron-mmar.yml b/.github/workflows/cron-mmar.yml index 735c23117c..f61ba59368 100644 --- a/.github/workflows/cron-mmar.yml +++ b/.github/workflows/cron-mmar.yml @@ -37,6 +37,6 @@ jobs: - name: Loading MMARs run: | # clean up temporary files - $(pwd)/runtests.sh --clean + $(pwd)/runtests.sh --build --clean # run tests python -m tests.ngc_mmar_loading diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 9cc5f595ac..f50363b1e0 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -48,8 +48,8 @@ jobs: python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' - BUILD_MONAI=1 ./runtests.sh --coverage --unittests --disttests # unit tests with coverage report - BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report + BUILD_MONAI=1 ./runtests.sh --build --coverage --unittests --disttests # unit tests with coverage report + BUILD_MONAI=1 ./runtests.sh --build --coverage --net # integration tests with coverage report coverage xml if pgrep python; then pkill python; fi - name: Upload coverage @@ -91,8 +91,8 @@ jobs: python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' - BUILD_MONAI=1 ./runtests.sh --coverage --unittests --disttests # unit tests with coverage report - BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report + BUILD_MONAI=1 ./runtests.sh --build --coverage --unittests --disttests # unit tests with coverage report + BUILD_MONAI=1 ./runtests.sh --build --coverage --net # integration tests with coverage report coverage xml if pgrep python; then pkill python; fi - name: Upload coverage @@ -190,8 +190,8 @@ jobs: python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' ngc --version - BUILD_MONAI=1 ./runtests.sh --coverage --pytype --unittests --disttests # unit tests with pytype checks, coverage report - BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report + BUILD_MONAI=1 ./runtests.sh --build --coverage --pytype --unittests --disttests # unit tests with pytype checks, coverage report + BUILD_MONAI=1 ./runtests.sh --build --coverage --net # integration tests with coverage report coverage xml if pgrep python; then pkill python; fi - name: Upload coverage diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 6da018f8cf..4b93632723 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -46,8 +46,8 @@ jobs: python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' - BUILD_MONAI=1 ./runtests.sh --net - BUILD_MONAI=1 ./runtests.sh --unittests --disttests + BUILD_MONAI=1 ./runtests.sh --build --net + BUILD_MONAI=1 ./runtests.sh --build --unittests --disttests if pgrep python; then pkill python; fi shell: bash - name: Add reaction diff --git a/.github/workflows/pythonapp-gpu.yml b/.github/workflows/pythonapp-gpu.yml index 45b6dd2d9c..848eaedcd6 100644 --- a/.github/workflows/pythonapp-gpu.yml +++ b/.github/workflows/pythonapp-gpu.yml @@ -124,7 +124,7 @@ jobs: python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' python -c "import monai; monai.config.print_config()" # build for the current self-hosted CI Tesla V100 - BUILD_MONAI=1 TORCH_CUDA_ARCH_LIST="7.0" ./runtests.sh --quick --unittests --disttests + BUILD_MONAI=1 TORCH_CUDA_ARCH_LIST="7.0" ./runtests.sh --build --quick --unittests --disttests if [ ${{ matrix.environment }} = "PT110+CUDA102" ]; then # test the clang-format tool downloading once coverage run -m tests.clang_format_utils diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index e0f3763f85..7237c8d54d 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -44,9 +44,9 @@ jobs: - name: Lint and type check run: | # clean up temporary files - $(pwd)/runtests.sh --clean + $(pwd)/runtests.sh --build --clean # Git hub actions have 2 cores, so parallize pytype - $(pwd)/runtests.sh --codeformat -j 2 + $(pwd)/runtests.sh --build --codeformat -j 2 quick-py3: # full dependencies installed tests for different OS runs-on: ${{ matrix.os }} diff --git a/.github/workflows/setupapp.yml b/.github/workflows/setupapp.yml index 8ccf74a96d..ede2c87b92 100644 --- a/.github/workflows/setupapp.yml +++ b/.github/workflows/setupapp.yml @@ -59,8 +59,8 @@ jobs: python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' - BUILD_MONAI=1 ./runtests.sh --coverage --unittests --disttests # unit tests with coverage report - BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report + BUILD_MONAI=1 ./runtests.sh --build --coverage --unittests --disttests # unit tests with coverage report + BUILD_MONAI=1 ./runtests.sh --build --coverage --net # integration tests with coverage report coverage xml if pgrep python; then pkill python; fi shell: bash @@ -104,7 +104,7 @@ jobs: run: | python -m pip list python -c 'import torch; print(torch.__version__); print(torch.rand(5,3))' - BUILD_MONAI=1 ./runtests.sh --quick --unittests --disttests + BUILD_MONAI=1 ./runtests.sh --build --quick --unittests --disttests coverage xml - name: Upload coverage uses: codecov/codecov-action@v1 diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index 4d50db8438..91b944bde5 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -90,6 +90,7 @@ def print_config(file=sys.stdout): print(f"{k} version: {v}", file=file, flush=True) print(f"MONAI flags: HAS_EXT = {HAS_EXT}, USE_COMPILED = {USE_COMPILED}") print(f"MONAI rev id: {monai.__revision_id__}") + print(f"MONAI __file__: {monai.__file__}") print("\nOptional dependencies:", file=file, flush=True) for k, v in get_optional_config_values().items(): diff --git a/runtests.sh b/runtests.sh index fd84c2f102..70ca8df5c2 100755 --- a/runtests.sh +++ b/runtests.sh @@ -38,6 +38,7 @@ doNetTests=false doDryRun=false doZooTests=false doUnitTests=false +doBuild=false doBlackFormat=false doBlackFix=false doIsortFormat=false @@ -57,7 +58,7 @@ PY_EXE=${MONAI_PY_EXE:-$(which python)} function print_usage { echo "runtests.sh [--codeformat] [--autofix] [--black] [--isort] [--flake8] [--clangformat] [--pytype] [--mypy]" echo " [--unittests] [--disttests] [--coverage] [--quick] [--min] [--net] [--dryrun] [-j number] [--list_tests]" - echo " [--copyright] [--clean] [--help] [--version]" + echo " [--copyright] [--build] [--clean] [--help] [--version]" echo "" echo "MONAI unit testing utilities." echo "" @@ -88,6 +89,7 @@ function print_usage { echo " -q, --quick : skip long running unit tests and integration tests" echo " -m, --min : only run minimal unit tests which do not require optional packages" echo " --net : perform integration testing" + echo " -b, --build : compile and install the source code folder an editable release." echo " --list_tests : list unit tests and exit" echo "" echo "Misc. options:" @@ -106,7 +108,7 @@ function print_usage { } function check_import { - echo "python: ${PY_EXE}" + echo "Python: ${PY_EXE}" ${cmdPrefix}${PY_EXE} -c "import monai" } @@ -278,6 +280,9 @@ do --copyright) doCopyRight=true ;; + -b|--build) + doBuild=true + ;; -c|--clean) doCleanup=true ;; @@ -322,6 +327,14 @@ else check_import fi +if [ $doBuild = true ] +then + echo "${separator}${blue}compile and install${noColor}" + # try to compile MONAI cpp + compile_cpp + + echo "${green}done! (to uninstall and clean up, please use \"./runtests.sh --clean\")${noColor}" +fi if [ $doCleanup = true ] then @@ -343,9 +356,6 @@ then exit fi -# try to compile MONAI cpp -compile_cpp - # unconditionally report on the state of monai print_version diff --git a/tests/min_tests.py b/tests/min_tests.py index 005b72af9a..00f3e49850 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -101,6 +101,7 @@ def run_testsuit(): "test_label_filter", "test_lltm", "test_lmdbdataset", + "test_lmdbdataset_dist", "test_load_image", "test_load_imaged", "test_load_spacing_orientation", diff --git a/tests/test_distcall.py b/tests/test_call_dist.py similarity index 100% rename from tests/test_distcall.py rename to tests/test_call_dist.py diff --git a/tests/test_lmdbdataset_dist.py b/tests/test_lmdbdataset_dist.py new file mode 100644 index 0000000000..cad2949dde --- /dev/null +++ b/tests/test_lmdbdataset_dist.py @@ -0,0 +1,72 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import shutil +import tempfile +import unittest + +import numpy as np + +from monai.data import LMDBDataset, json_hashing +from monai.transforms import Transform +from tests.utils import DistCall, DistTestCase, skip_if_windows + + +class _InplaceXform(Transform): + def __call__(self, data): + if data: + data[0] = data[0] + np.pi + else: + data.append(1) + return data + + +@skip_if_windows +class TestMPLMDBDataset(DistTestCase): + def setUp(self): + self.tempdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tempdir) + + @DistCall(nnodes=1, nproc_per_node=1) + def test_mp_cache(self): + items = [[list(range(i))] for i in range(5)] + + ds = LMDBDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir, lmdb_kwargs={"map_size": 10 * 1024}) + self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) + ds1 = LMDBDataset(items, transform=_InplaceXform(), cache_dir=self.tempdir, lmdb_kwargs={"map_size": 10 * 1024}) + self.assertEqual(list(ds1), list(ds)) + self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) + + ds = LMDBDataset( + items, + transform=_InplaceXform(), + cache_dir=self.tempdir, + lmdb_kwargs={"map_size": 10 * 1024}, + hash_func=json_hashing, + ) + self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) + ds1 = LMDBDataset( + items, + transform=_InplaceXform(), + cache_dir=self.tempdir, + lmdb_kwargs={"map_size": 10 * 1024}, + hash_func=json_hashing, + ) + self.assertEqual(list(ds1), list(ds)) + self.assertEqual(items, [[[]], [[0]], [[0, 1]], [[0, 1, 2]], [[0, 1, 2, 3]]]) + + self.assertTrue(isinstance(ds1.info(), dict)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_distributed_sampler.py b/tests/test_sampler_dist.py similarity index 100% rename from tests/test_distributed_sampler.py rename to tests/test_sampler_dist.py diff --git a/tests/test_timedcall.py b/tests/test_timedcall_dist.py similarity index 100% rename from tests/test_timedcall.py rename to tests/test_timedcall_dist.py diff --git a/tests/test_distributed_weighted_random_sampler.py b/tests/test_weighted_random_sampler_dist.py similarity index 100% rename from tests/test_distributed_weighted_random_sampler.py rename to tests/test_weighted_random_sampler_dist.py