From 45ea21447863480a4fb0f16cefdf4e0b6df93e2b Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 25 Nov 2020 16:47:19 +0000 Subject: [PATCH 01/11] print system and gpu info Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/config/deviceconfig.py | 107 +++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index 06f716071e..19cf845eb5 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -108,6 +108,8 @@ except (OptionalImportError, ImportError, AttributeError): HAS_EXT = USE_COMPILED = False +psutil, has_psutil = optional_import("psutil") + def get_config_values(): """ @@ -176,3 +178,108 @@ def get_torch_version_tuple(): tuple of ints represents the pytorch major/minor version. """ return tuple((int(x) for x in torch.__version__.split(".")[:2])) + +def _dict_append(in_dict, key, fn): + try: + in_dict[key] = fn() + except: + in_dict[key] = "UNKNOWN for given OS" + + +def get_system_info(file=sys.stdout): + """ + Get system info as an ordered dictionary. + """ + if not has_psutil: + print("`psutil` required for `get_system_info", file=file, flush=True) + return + + output = OrderedDict() + + p = psutil.Process() + with p.oneshot(): + _dict_append(output, "Process name", lambda: p.name()) + _dict_append(output, "Command", lambda: p.cmdline()) + _dict_append(output, "Open files", lambda: p.open_files()) + _dict_append(output, "Num physical CPUs", lambda: psutil.cpu_count(logical=False)) + _dict_append(output, "Num logical CPUs", lambda: psutil.cpu_count(logical=True)) + _dict_append(output, "Num usable CPUs", lambda: len(psutil.Process().cpu_affinity())) + _dict_append(output, "CPU usage (%)", lambda: psutil.cpu_percent(percpu=True)) + _dict_append(output, "CPU freq. (MHz)", lambda: round(psutil.cpu_freq(percpu=False)[0])) + _dict_append(output, "Load avg. in last 1, 5, 15 mins (%)", lambda: [round(x / psutil.cpu_count() * 100, 1) for x in psutil.getloadavg()]) + _dict_append(output, "Disk usage (%)", lambda: psutil.disk_usage(os.getcwd()).percent) + _dict_append(output, "Avg. sensor temp. (°C)", lambda: np.mean([item.current for sublist in psutil.sensors_temperatures().values() for item in sublist])) + + return output + + +def print_system_info(file=sys.stdout): + """ + Print system info to `file`. Requires the optional library, `psutil`. + + Args: + file: `print()` text stream file. Defaults to `sys.stdout`. + """ + if not has_psutil: + print("`psutil` required for `print_system_info", file=file, flush=True) + return + + for k, v in get_system_info(file).items(): + print(f"{k}: {v}", file=file, flush=True) + +def get_gpu_info(): + + output = OrderedDict() + + num_gpus = torch.cuda.device_count() + _dict_append(output,"Num GPUs", lambda: num_gpus) + if num_gpus > 0: + _dict_append(output,"Current device", lambda: torch.cuda.current_device()) + _dict_append(output,"Library compiled for CUDA architectures", lambda: torch.cuda.get_arch_list()) + for gpu in range(num_gpus): + _dict_append(output,"Info for GPU", lambda: gpu) + gpu_info = torch.cuda.get_device_properties(gpu) + _dict_append(output,"\tName", lambda: gpu_info.name) + _dict_append(output,"\tIs integrated", lambda: bool(gpu_info.is_integrated)) + _dict_append(output,"\tIs multi GPU board", lambda: bool(gpu_info.is_multi_gpu_board)) + _dict_append(output,"\tMulti processor count", lambda: gpu_info.multi_processor_count) + _dict_append(output,"\tTotal memory (GB)", lambda: round(gpu_info.total_memory / 1024**3, 1)) + _dict_append(output,"\tCached memory (GB)", lambda: round(torch.cuda.memory_cached(gpu) / 1024**3, 1)) + _dict_append(output,"\tAllocated memory (GB)", lambda: round(torch.cuda.memory_allocated(gpu) / 1024**3, 1)) + _dict_append(output,"\tCUDA capability (maj.min)", lambda: f"{gpu_info.major}.{gpu_info.minor}") + + return output + +def print_gpu_info(file=sys.stdout): + """ + Print GPU info to `file`. + + Args: + file: `print()` text stream file. Defaults to `sys.stdout`. + """ + for k, v in get_gpu_info().items(): + print(f"{k}: {v}", file=file, flush=True) + +def print_debug_info(file=sys.stdout): + """ + Print config (installed dependencies, etc.) and system info for debugging. + + Args: + file: `print()` text stream file. Defaults to `sys.stdout`. + """ + print("================================",file=file, flush=True) + print("Printing MONAI config...",file=file, flush=True) + print("================================",file=file, flush=True) + print_config(file) + print("\n================================",file=file, flush=True) + print("Printing system config...") + print("================================",file=file, flush=True) + print_system_info(file) + print("\n================================",file=file, flush=True) + print("Printing GPU config...") + print("================================",file=file, flush=True) + print_gpu_info(file) + + +if __name__ == "__main__": + print_debug_info() From e4c9f841508b856e342777d0ffb4420ea87398bc Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 25 Nov 2020 17:27:53 +0000 Subject: [PATCH 02/11] autofix changes Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/config/deviceconfig.py | 83 ++++++++++++++++++++---------------- 1 file changed, 47 insertions(+), 36 deletions(-) diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index 19cf845eb5..97932146e1 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -94,7 +94,7 @@ try: - import lmdb + import lmdb # type: ignore lmdb_version = lmdb.__version__ del lmdb @@ -179,22 +179,23 @@ def get_torch_version_tuple(): """ return tuple((int(x) for x in torch.__version__.split(".")[:2])) + def _dict_append(in_dict, key, fn): try: in_dict[key] = fn() - except: + except BaseException: in_dict[key] = "UNKNOWN for given OS" -def get_system_info(file=sys.stdout): +def get_system_info(file=sys.stdout) -> OrderedDict: """ Get system info as an ordered dictionary. """ + output: OrderedDict = OrderedDict() + if not has_psutil: print("`psutil` required for `get_system_info", file=file, flush=True) - return - - output = OrderedDict() + return output p = psutil.Process() with p.oneshot(): @@ -206,14 +207,22 @@ def get_system_info(file=sys.stdout): _dict_append(output, "Num usable CPUs", lambda: len(psutil.Process().cpu_affinity())) _dict_append(output, "CPU usage (%)", lambda: psutil.cpu_percent(percpu=True)) _dict_append(output, "CPU freq. (MHz)", lambda: round(psutil.cpu_freq(percpu=False)[0])) - _dict_append(output, "Load avg. in last 1, 5, 15 mins (%)", lambda: [round(x / psutil.cpu_count() * 100, 1) for x in psutil.getloadavg()]) + _dict_append( + output, + "Load avg. in last 1, 5, 15 mins (%)", + lambda: [round(x / psutil.cpu_count() * 100, 1) for x in psutil.getloadavg()], + ) _dict_append(output, "Disk usage (%)", lambda: psutil.disk_usage(os.getcwd()).percent) - _dict_append(output, "Avg. sensor temp. (°C)", lambda: np.mean([item.current for sublist in psutil.sensors_temperatures().values() for item in sublist])) + _dict_append( + output, + "Avg. sensor temp. (°C)", + lambda: np.mean([item.current for sublist in psutil.sensors_temperatures().values() for item in sublist]), + ) return output -def print_system_info(file=sys.stdout): +def print_system_info(file=sys.stdout) -> None: """ Print system info to `file`. Requires the optional library, `psutil`. @@ -221,36 +230,37 @@ def print_system_info(file=sys.stdout): file: `print()` text stream file. Defaults to `sys.stdout`. """ if not has_psutil: - print("`psutil` required for `print_system_info", file=file, flush=True) - return + print("`psutil` required for `print_system_info`", file=file, flush=True) + else: + for k, v in get_system_info(file).items(): + print(f"{k}: {v}", file=file, flush=True) - for k, v in get_system_info(file).items(): - print(f"{k}: {v}", file=file, flush=True) -def get_gpu_info(): +def get_gpu_info() -> OrderedDict: - output = OrderedDict() + output: OrderedDict = OrderedDict() num_gpus = torch.cuda.device_count() - _dict_append(output,"Num GPUs", lambda: num_gpus) + _dict_append(output, "Num GPUs", lambda: num_gpus) if num_gpus > 0: - _dict_append(output,"Current device", lambda: torch.cuda.current_device()) - _dict_append(output,"Library compiled for CUDA architectures", lambda: torch.cuda.get_arch_list()) + _dict_append(output, "Current device", lambda: torch.cuda.current_device()) + _dict_append(output, "Library compiled for CUDA architectures", lambda: torch.cuda.get_arch_list()) for gpu in range(num_gpus): - _dict_append(output,"Info for GPU", lambda: gpu) + _dict_append(output, "Info for GPU", lambda: gpu) gpu_info = torch.cuda.get_device_properties(gpu) - _dict_append(output,"\tName", lambda: gpu_info.name) - _dict_append(output,"\tIs integrated", lambda: bool(gpu_info.is_integrated)) - _dict_append(output,"\tIs multi GPU board", lambda: bool(gpu_info.is_multi_gpu_board)) - _dict_append(output,"\tMulti processor count", lambda: gpu_info.multi_processor_count) - _dict_append(output,"\tTotal memory (GB)", lambda: round(gpu_info.total_memory / 1024**3, 1)) - _dict_append(output,"\tCached memory (GB)", lambda: round(torch.cuda.memory_cached(gpu) / 1024**3, 1)) - _dict_append(output,"\tAllocated memory (GB)", lambda: round(torch.cuda.memory_allocated(gpu) / 1024**3, 1)) - _dict_append(output,"\tCUDA capability (maj.min)", lambda: f"{gpu_info.major}.{gpu_info.minor}") + _dict_append(output, "\tName", lambda: gpu_info.name) + _dict_append(output, "\tIs integrated", lambda: bool(gpu_info.is_integrated)) + _dict_append(output, "\tIs multi GPU board", lambda: bool(gpu_info.is_multi_gpu_board)) + _dict_append(output, "\tMulti processor count", lambda: gpu_info.multi_processor_count) + _dict_append(output, "\tTotal memory (GB)", lambda: round(gpu_info.total_memory / 1024 ** 3, 1)) + _dict_append(output, "\tCached memory (GB)", lambda: round(torch.cuda.memory_cached(gpu) / 1024 ** 3, 1)) + _dict_append(output, "\tAllocated memory (GB)", lambda: round(torch.cuda.memory_allocated(gpu) / 1024 ** 3, 1)) + _dict_append(output, "\tCUDA capability (maj.min)", lambda: f"{gpu_info.major}.{gpu_info.minor}") return output -def print_gpu_info(file=sys.stdout): + +def print_gpu_info(file=sys.stdout) -> None: """ Print GPU info to `file`. @@ -260,24 +270,25 @@ def print_gpu_info(file=sys.stdout): for k, v in get_gpu_info().items(): print(f"{k}: {v}", file=file, flush=True) -def print_debug_info(file=sys.stdout): + +def print_debug_info(file=sys.stdout) -> None: """ Print config (installed dependencies, etc.) and system info for debugging. Args: file: `print()` text stream file. Defaults to `sys.stdout`. """ - print("================================",file=file, flush=True) - print("Printing MONAI config...",file=file, flush=True) - print("================================",file=file, flush=True) + print("================================", file=file, flush=True) + print("Printing MONAI config...", file=file, flush=True) + print("================================", file=file, flush=True) print_config(file) - print("\n================================",file=file, flush=True) + print("\n================================", file=file, flush=True) print("Printing system config...") - print("================================",file=file, flush=True) + print("================================", file=file, flush=True) print_system_info(file) - print("\n================================",file=file, flush=True) + print("\n================================", file=file, flush=True) print("Printing GPU config...") - print("================================",file=file, flush=True) + print("================================", file=file, flush=True) print_gpu_info(file) From 3b7540b615c5c87acd1fecb9c64fcbb1cf9a96d8 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 25 Nov 2020 17:31:35 +0000 Subject: [PATCH 03/11] adds psutil as optional dependency Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- requirements-dev.txt | 1 + setup.cfg | 2 ++ 2 files changed, 3 insertions(+) diff --git a/requirements-dev.txt b/requirements-dev.txt index a6795cc3fa..fb0c24c859 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -25,3 +25,4 @@ pytype>=2020.6.1 mypy>=0.790 ninja torchvision +psutil diff --git a/setup.cfg b/setup.cfg index b3de5c9df8..2c11b789c7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -52,6 +52,8 @@ tqdm = tqdm>=4.47.0 lmdb = lmdb +psutil = + psutil [flake8] select = B,C,E,F,N,P,T4,W,B9 From 182a529d01a0259585e84cb1f8105271937884ed Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 25 Nov 2020 17:35:44 +0000 Subject: [PATCH 04/11] cached -> reserved Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/config/deviceconfig.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index 97932146e1..991bc50e46 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -253,7 +253,7 @@ def get_gpu_info() -> OrderedDict: _dict_append(output, "\tIs multi GPU board", lambda: bool(gpu_info.is_multi_gpu_board)) _dict_append(output, "\tMulti processor count", lambda: gpu_info.multi_processor_count) _dict_append(output, "\tTotal memory (GB)", lambda: round(gpu_info.total_memory / 1024 ** 3, 1)) - _dict_append(output, "\tCached memory (GB)", lambda: round(torch.cuda.memory_cached(gpu) / 1024 ** 3, 1)) + _dict_append(output, "\tCached memory (GB)", lambda: round(torch.cuda.memory_reserved(gpu) / 1024 ** 3, 1)) _dict_append(output, "\tAllocated memory (GB)", lambda: round(torch.cuda.memory_allocated(gpu) / 1024 ** 3, 1)) _dict_append(output, "\tCUDA capability (maj.min)", lambda: f"{gpu_info.major}.{gpu_info.minor}") From 2b959d4c518e16d0094dd99a636ca4cb8e246529 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Thu, 26 Nov 2020 10:18:02 +0000 Subject: [PATCH 05/11] add test for print debug info Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- tests/test_print_info.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 tests/test_print_info.py diff --git a/tests/test_print_info.py b/tests/test_print_info.py new file mode 100644 index 0000000000..6a6c4a4a8b --- /dev/null +++ b/tests/test_print_info.py @@ -0,0 +1,23 @@ +# Copyright 2020 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from monai.config import print_debug_info + + +class TestPrintInfo(unittest.TestCase): + def test_print_info(self): + print_debug_info() + + +if __name__ == "__main__": + unittest.main() From 9dac9a308332e743b596fa7a02ed24c8aa7169e7 Mon Sep 17 00:00:00 2001 From: Rich <33289025+rijobro@users.noreply.github.com> Date: Thu, 26 Nov 2020 11:28:11 +0000 Subject: [PATCH 06/11] update installation and bug report Signed-off-by: Rich <33289025+rijobro@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/bug_report.md | 13 +++++++------ docs/source/installation.md | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 8a56afe973..cebcdfc917 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -22,12 +22,13 @@ A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. -**Environment (please complete the following information; e.g. using `sh runtests.sh -v`):** - - OS - - Python version - - MONAI version [e.g. git commit hash] - - CUDA/cuDNN version - - GPU models and configuration +**Environment** + +Ensuring you use the relevant python executable, please paste the output of: + +``` +python -c 'import monai; monai.config.print_debug_info()' +``` **Additional context** Add any other context about the problem here. diff --git a/docs/source/installation.md b/docs/source/installation.md index 28dd65f066..68efb5d0a5 100644 --- a/docs/source/installation.md +++ b/docs/source/installation.md @@ -147,9 +147,9 @@ Since MONAI v0.2.0, the extras syntax such as `pip install 'monai[nibabel]'` is - The options are ``` -[nibabel, skimage, pillow, tensorboard, gdown, ignite, torchvision, itk, tqdm, lmdb] +[nibabel, skimage, pillow, tensorboard, gdown, ignite, torchvision, itk, tqdm, lmdb, psutil] ``` which correspond to `nibabel`, `scikit-image`, `pillow`, `tensorboard`, -`gdown`, `pytorch-ignite`, `torchvision`, `itk`, and `tqdm` respectively. +`gdown`, `pytorch-ignite`, `torchvision`, `itk`, `tqdm`, `lmdb` and `psutil`, respectively. - `pip install 'monai[all]'` installs all the optional dependencies. From b44fc3d0c30d82698a5cb9631efbeb9dd0b13415 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Thu, 26 Nov 2020 13:14:29 +0000 Subject: [PATCH 07/11] RAM, OS, CUDA ver, cuDNN ver, etc Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/config/deviceconfig.py | 83 +++++++++++++++++++++++++----------- 1 file changed, 57 insertions(+), 26 deletions(-) diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index 991bc50e46..4eaff2692e 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -11,6 +11,7 @@ import os import platform +import re import sys from collections import OrderedDict @@ -109,6 +110,7 @@ HAS_EXT = USE_COMPILED = False psutil, has_psutil = optional_import("psutil") +psutil_version = psutil.__version__ if has_psutil else "NOT INSTALLED or UNKNOWN VERSION." def get_config_values(): @@ -118,8 +120,6 @@ def get_config_values(): output = OrderedDict() output["MONAI"] = monai.__version__ - output["Python"] = sys.version.replace("\n", " ") - output["OS"] = f"{platform.system()} ({platform.release()})" output["Numpy"] = np.version.full_version output["Pytorch"] = torch.__version__ @@ -142,6 +142,7 @@ def get_optional_config_values(): output["ITK"] = itk_version output["tqdm"] = tqdm_version output["lmdb"] = lmdb_version + output["psutil"] = psutil_version return output @@ -193,31 +194,52 @@ def get_system_info(file=sys.stdout) -> OrderedDict: """ output: OrderedDict = OrderedDict() + _dict_append(output, "System", lambda: platform.system()) + if output["System"] == "Windows": + _dict_append(output, "Win32 version", lambda: platform.win32_ver()) + _dict_append(output, "Win32 edition", lambda: platform.win32_edition()) + elif output["System"] == "Darwin": + _dict_append(output, "Mac version", lambda: platform.mac_ver()[0]) + else: + linux_ver = re.search(r'PRETTY_NAME="(.*)"', open("/etc/os-release", "r").read()) + if linux_ver: + _dict_append(output, "Linux version", lambda: linux_ver.group(1)) + + _dict_append(output, "Platform", lambda: platform.platform()) + _dict_append(output, "Processor", lambda: platform.processor()) + _dict_append(output, "Machine", lambda: platform.machine()) + _dict_append(output, "Python version", lambda: platform.python_version()) + if not has_psutil: - print("`psutil` required for `get_system_info", file=file, flush=True) - return output - - p = psutil.Process() - with p.oneshot(): - _dict_append(output, "Process name", lambda: p.name()) - _dict_append(output, "Command", lambda: p.cmdline()) - _dict_append(output, "Open files", lambda: p.open_files()) - _dict_append(output, "Num physical CPUs", lambda: psutil.cpu_count(logical=False)) - _dict_append(output, "Num logical CPUs", lambda: psutil.cpu_count(logical=True)) - _dict_append(output, "Num usable CPUs", lambda: len(psutil.Process().cpu_affinity())) - _dict_append(output, "CPU usage (%)", lambda: psutil.cpu_percent(percpu=True)) - _dict_append(output, "CPU freq. (MHz)", lambda: round(psutil.cpu_freq(percpu=False)[0])) - _dict_append( - output, - "Load avg. in last 1, 5, 15 mins (%)", - lambda: [round(x / psutil.cpu_count() * 100, 1) for x in psutil.getloadavg()], - ) - _dict_append(output, "Disk usage (%)", lambda: psutil.disk_usage(os.getcwd()).percent) - _dict_append( - output, - "Avg. sensor temp. (°C)", - lambda: np.mean([item.current for sublist in psutil.sensors_temperatures().values() for item in sublist]), - ) + _dict_append(output, "`psutil` missing", lambda: "run `pip install monai[psutil]`") + else: + p = psutil.Process() + with p.oneshot(): + _dict_append(output, "Process name", lambda: p.name()) + _dict_append(output, "Command", lambda: p.cmdline()) + _dict_append(output, "Open files", lambda: p.open_files()) + _dict_append(output, "Num physical CPUs", lambda: psutil.cpu_count(logical=False)) + _dict_append(output, "Num logical CPUs", lambda: psutil.cpu_count(logical=True)) + _dict_append(output, "Num usable CPUs", lambda: len(psutil.Process().cpu_affinity())) + _dict_append(output, "CPU usage (%)", lambda: psutil.cpu_percent(percpu=True)) + _dict_append(output, "CPU freq. (MHz)", lambda: round(psutil.cpu_freq(percpu=False)[0])) + _dict_append( + output, + "Load avg. in last 1, 5, 15 mins (%)", + lambda: [round(x / psutil.cpu_count() * 100, 1) for x in psutil.getloadavg()], + ) + _dict_append(output, "Disk usage (%)", lambda: psutil.disk_usage(os.getcwd()).percent) + _dict_append( + output, + "Avg. sensor temp. (°C)", + lambda: round( + np.mean([item.current for sublist in psutil.sensors_temperatures().values() for item in sublist], 1) + ), + ) + mem = psutil.virtual_memory() + _dict_append(output, "Total physical memory (GB)", lambda: round(mem.total / 1024 ** 3, 1)) + _dict_append(output, "Available memory (GB)", lambda: round(mem.available / 1024 ** 3, 1)) + _dict_append(output, "Used memory (GB)", lambda: round(mem.used / 1024 ** 3, 1)) return output @@ -242,6 +264,15 @@ def get_gpu_info() -> OrderedDict: num_gpus = torch.cuda.device_count() _dict_append(output, "Num GPUs", lambda: num_gpus) + + _dict_append(output, "Has CUDA", lambda: bool(torch.cuda.is_available())) + if output["Has CUDA"]: + _dict_append(output, "CUDA version", lambda: torch.version.cuda) + cudnn_ver = torch.backends.cudnn.version() + _dict_append(output, "cuDNN enabled", lambda: bool(cudnn_ver)) + if cudnn_ver: + _dict_append(output, "cuDNN version", lambda: cudnn_ver) + if num_gpus > 0: _dict_append(output, "Current device", lambda: torch.cuda.current_device()) _dict_append(output, "Library compiled for CUDA architectures", lambda: torch.cuda.get_arch_list()) From 9f490bc3951312c6cea24e4cddd5becf33af485a Mon Sep 17 00:00:00 2001 From: Rich <33289025+rijobro@users.noreply.github.com> Date: Thu, 26 Nov 2020 17:26:15 +0000 Subject: [PATCH 08/11] simplify getting versions of optional dependencies Signed-off-by: Rich <33289025+rijobro@users.noreply.github.com> --- monai/config/deviceconfig.py | 94 ++++-------------------------------- monai/utils/module.py | 15 ++++++ 2 files changed, 25 insertions(+), 84 deletions(-) diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index 4eaff2692e..cdaa1dd442 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -19,63 +19,7 @@ import torch import monai -from monai.utils import OptionalImportError, optional_import - -try: - import ignite - - ignite_version = ignite.__version__ - del ignite -except (ImportError, AttributeError): - ignite_version = "NOT INSTALLED or UNKNOWN VERSION." - -try: - import nibabel - - nibabel_version = nibabel.__version__ - del nibabel -except (ImportError, AttributeError): - nibabel_version = "NOT INSTALLED or UNKNOWN VERSION." - -try: - import skimage - - skimage_version = skimage.__version__ - del skimage -except (ImportError, AttributeError): - skimage_version = "NOT INSTALLED or UNKNOWN VERSION." - -try: - import PIL - - PIL_version = PIL.__version__ - del PIL -except (ImportError, AttributeError): - PIL_version = "NOT INSTALLED or UNKNOWN VERSION." - -try: - import tensorboard - - tensorboard_version = tensorboard.__version__ - del tensorboard -except (ImportError, AttributeError): - tensorboard_version = "NOT INSTALLED or UNKNOWN VERSION." - -try: - import gdown - - gdown_version = gdown.__version__ - del gdown -except (ImportError, AttributeError): - gdown_version = "NOT INSTALLED or UNKNOWN VERSION." - -try: - import torchvision - - torchvision_version = torchvision.__version__ - del torchvision -except (ImportError, AttributeError): - torchvision_version = "NOT INSTALLED or UNKNOWN VERSION." +from monai.utils import OptionalImportError, get_package_version, optional_import try: import itk # type: ignore @@ -85,24 +29,6 @@ except (ImportError, AttributeError): itk_version = "NOT INSTALLED or UNKNOWN VERSION." -try: - import tqdm - - tqdm_version = tqdm.__version__ - del tqdm -except (ImportError, AttributeError): - tqdm_version = "NOT INSTALLED or UNKNOWN VERSION." - - -try: - import lmdb # type: ignore - - lmdb_version = lmdb.__version__ - del lmdb -except (ImportError, AttributeError): - lmdb_version = "NOT INSTALLED or UNKNOWN VERSION." - - try: _, HAS_EXT = optional_import("monai._C") USE_COMPILED = HAS_EXT and os.getenv("BUILD_MONAI", "0") == "1" @@ -132,16 +58,16 @@ def get_optional_config_values(): """ output = OrderedDict() - output["Pytorch Ignite"] = ignite_version - output["Nibabel"] = nibabel_version - output["scikit-image"] = skimage_version - output["Pillow"] = PIL_version - output["Tensorboard"] = tensorboard_version - output["gdown"] = gdown_version - output["TorchVision"] = torchvision_version + output["Pytorch Ignite"] = get_package_version("ignite") + output["Nibabel"] = get_package_version("nibabel") + output["scikit-image"] = get_package_version("skimage") + output["Pillow"] = get_package_version("PIL") + output["Tensorboard"] = get_package_version("tensorboard") + output["gdown"] = get_package_version("gdown") + output["TorchVision"] = get_package_version("torchvision") output["ITK"] = itk_version - output["tqdm"] = tqdm_version - output["lmdb"] = lmdb_version + output["tqdm"] = get_package_version("tqdm") + output["lmdb"] = get_package_version("lmdb") output["psutil"] = psutil_version return output diff --git a/monai/utils/module.py b/monai/utils/module.py index 4b8c1e91e7..814062589e 100644 --- a/monai/utils/module.py +++ b/monai/utils/module.py @@ -29,6 +29,7 @@ "load_submodules", "get_full_type_name", "has_option", + "get_package_version", ] @@ -231,3 +232,17 @@ def has_option(obj, keywords: Union[str, Sequence[str]]) -> bool: if key not in sig.parameters: return False return True + + +def get_package_version(dep_name, default="NOT INSTALLED or UNKNOWN VERSION."): + """ + Try to load package and get version. Unload once finished. If not found, return `default`. + """ + dep_ver = default + dep, has_dep = optional_import(dep_name) + if has_dep: + if hasattr(dep, "__version__"): + dep_ver = dep.__version__ + del dep + del sys.modules[dep_name] + return dep_ver From c4253e2fced8d92d82883c61469abaf39bb8bbfc Mon Sep 17 00:00:00 2001 From: Rich <33289025+rijobro@users.noreply.github.com> Date: Thu, 26 Nov 2020 19:26:52 +0000 Subject: [PATCH 09/11] only unload if not already loaded Signed-off-by: Rich <33289025+rijobro@users.noreply.github.com> --- monai/utils/module.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/monai/utils/module.py b/monai/utils/module.py index 814062589e..0edf9047ac 100644 --- a/monai/utils/module.py +++ b/monai/utils/module.py @@ -236,13 +236,19 @@ def has_option(obj, keywords: Union[str, Sequence[str]]) -> bool: def get_package_version(dep_name, default="NOT INSTALLED or UNKNOWN VERSION."): """ - Try to load package and get version. Unload once finished. If not found, return `default`. + Try to load package and get version. If not found, return `default`. + + If the package was already loaded, leave it. If wasn't previously loaded, unload it. """ dep_ver = default + dep_already_loaded = dep_name not in sys.modules + dep, has_dep = optional_import(dep_name) if has_dep: if hasattr(dep, "__version__"): dep_ver = dep.__version__ - del dep - del sys.modules[dep_name] + # if not previously loaded, unload it + if not dep_already_loaded: + del dep + del sys.modules[dep_name] return dep_ver From add39b9eb2a814ff874301df8239f6fb87a83dcf Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Thu, 3 Dec 2020 16:01:32 +0000 Subject: [PATCH 10/11] Update deviceconfig.py remove utf-8 char Signed-off-by: Wenqi Li --- monai/config/deviceconfig.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index cdaa1dd442..b252abdfcc 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -157,7 +157,7 @@ def get_system_info(file=sys.stdout) -> OrderedDict: _dict_append(output, "Disk usage (%)", lambda: psutil.disk_usage(os.getcwd()).percent) _dict_append( output, - "Avg. sensor temp. (°C)", + "Avg. sensor temp. (Celsius)", lambda: round( np.mean([item.current for sublist in psutil.sensors_temperatures().values() for item in sublist], 1) ), From d5beea5acc953fb40a4d999bc9609323c3fbd00e Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Thu, 3 Dec 2020 18:03:29 +0000 Subject: [PATCH 11/11] display revision id Signed-off-by: Wenqi Li --- .github/workflows/pythonapp.yml | 3 +++ monai/__init__.py | 6 ++++-- monai/config/deviceconfig.py | 5 +++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 35ee5433cd..4db50b2723 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -97,6 +97,7 @@ jobs: - name: Run quick tests (CPU ${{ runner.os }}) run: | python -c 'import torch; print(torch.__version__); print(torch.rand(5,3))' + python -c "import monai; monai.config.print_config()" python -m unittest -v env: QUICKTEST: True @@ -145,6 +146,7 @@ jobs: - name: Run quick tests (CPU ${{ runner.os }}) run: | python -c 'import torch; print(torch.__version__); print(torch.rand(5,3))' + python -c "import monai; monai.config.print_config()" python -m tests.min_tests env: QUICKTEST: True @@ -230,6 +232,7 @@ jobs: echo $CUDA_VISIBLE_DEVICES python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' + python -c "import monai; monai.config.print_config()" BUILD_MONAI=1 ./runtests.sh --quick if [ ${{ matrix.environment }} == "PT16+CUDA110" ]; then # test the clang-format tool downloading once diff --git a/monai/__init__.py b/monai/__init__.py index 50dba61c22..a6f5c75309 100644 --- a/monai/__init__.py +++ b/monai/__init__.py @@ -17,8 +17,10 @@ PY_REQUIRED_MAJOR = 3 PY_REQUIRED_MINOR = 6 -__version__ = get_versions()["version"] -del get_versions +version_dict = get_versions() +__version__ = version_dict.get("version", "0+unknown") +__revision_id__ = version_dict.get("full-revisionid", None) +del get_versions, version_dict __copyright__ = "(c) 2020 MONAI Consortium" diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index b252abdfcc..85a531059e 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -83,6 +83,7 @@ def print_config(file=sys.stdout): for k, v in get_config_values().items(): print(f"{k} version: {v}", file=file, flush=True) print(f"MONAI flags: HAS_EXT = {HAS_EXT}, USE_COMPILED = {USE_COMPILED}") + print(f"MONAI rev id: {monai.__revision_id__}") print("\nOptional dependencies:", file=file, flush=True) for k, v in get_optional_config_values().items(): @@ -114,7 +115,7 @@ def _dict_append(in_dict, key, fn): in_dict[key] = "UNKNOWN for given OS" -def get_system_info(file=sys.stdout) -> OrderedDict: +def get_system_info() -> OrderedDict: """ Get system info as an ordered dictionary. """ @@ -180,7 +181,7 @@ def print_system_info(file=sys.stdout) -> None: if not has_psutil: print("`psutil` required for `print_system_info`", file=file, flush=True) else: - for k, v in get_system_info(file).items(): + for k, v in get_system_info().items(): print(f"{k}: {v}", file=file, flush=True)