From a74e9957e8a18501a6f70a1f3efb2ca27f0109e9 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Mon, 7 Aug 2023 16:26:15 +0100 Subject: [PATCH 01/15] Adds FID metric Signed-off-by: Mark Graham --- monai/metrics/__init__.py | 1 + monai/metrics/fid.py | 106 +++++++++++++++++++++++++++++++ tests/test_compute_fid_metric.py | 35 ++++++++++ 3 files changed, 142 insertions(+) create mode 100644 monai/metrics/fid.py create mode 100644 tests/test_compute_fid_metric.py diff --git a/monai/metrics/__init__.py b/monai/metrics/__init__.py index 4af1b5760d..45994f7d08 100644 --- a/monai/metrics/__init__.py +++ b/monai/metrics/__init__.py @@ -15,6 +15,7 @@ from .confusion_matrix import ConfusionMatrixMetric, compute_confusion_matrix_metric, get_confusion_matrix from .cumulative_average import CumulativeAverage from .f_beta_score import FBetaScore +from .fid import FIDMetric, compute_frechet_distance from .froc import compute_fp_tp_probs, compute_fp_tp_probs_nd, compute_froc_curve_data, compute_froc_score from .generalized_dice import GeneralizedDiceScore, compute_generalized_dice from .hausdorff_distance import HausdorffDistanceMetric, compute_hausdorff_distance, compute_percent_hausdorff_distance diff --git a/monai/metrics/fid.py b/monai/metrics/fid.py new file mode 100644 index 0000000000..03ab7b4a9d --- /dev/null +++ b/monai/metrics/fid.py @@ -0,0 +1,106 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from __future__ import annotations + +import numpy as np +import torch +from scipy import linalg + +from monai.metrics.metric import Metric + + +class FIDMetric(Metric): + """ + Frechet Inception Distance (FID). The FID calculates the distance between two distributions of feature vectors. + Based on: Heusel M. et al. "Gans trained by a two time-scale update rule converge to a local nash equilibrium." + https://arxiv.org/abs/1706.08500#. The inputs for this metric should be two groups of feature vectors (with format + (number images, number of features)) extracted from the a pretrained network. + + Originally, it was proposed to use the activations of the pool_3 layer of an Inception v3 pretrained with Imagenet. + However, others networks pretrained on medical datasets can be used as well (for example, RadImageNwt for 2D and + MedicalNet for 3D images). If the chosen model output is not a scalar, usually it is used a global spatial + average pooling. + """ + + def __init__(self) -> None: + super().__init__() + + def __call__(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + return get_fid_score(y_pred, y) + + +def get_fid_score(y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + y = y.double() + y_pred = y_pred.double() + + if y.ndimension() > 2: + raise ValueError("Inputs should have (number images, number of features) shape.") + + mu_y_pred = torch.mean(y_pred, dim=0) + sigma_y_pred = _cov(y_pred, rowvar=False) + mu_y = torch.mean(y, dim=0) + sigma_y = _cov(y, rowvar=False) + + return compute_frechet_distance(mu_y_pred, sigma_y_pred, mu_y, sigma_y) + + +def _cov(input_data: torch.Tensor, rowvar: bool = True) -> torch.Tensor: + """ + Estimate a covariance matrix of the variables. + + Args: + input_data: A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, + and each column a single observation of all those variables. + rowvar: If rowvar is True (default), then each row represents a variable, with observations in the columns. + Otherwise, the relationship is transposed: each column represents a variable, while the rows contain + observations. + """ + if input_data.dim() < 2: + input_data = input_data.view(1, -1) + + if not rowvar and input_data.size(0) != 1: + input_data = input_data.t() + + factor = 1.0 / (input_data.size(1) - 1) + input_data = input_data - torch.mean(input_data, dim=1, keepdim=True) + return factor * input_data.matmul(input_data.t()).squeeze() + + +def _sqrtm(input_data: torch.Tensor) -> torch.Tensor: + """Compute the square root of a matrix.""" + scipy_res, _ = linalg.sqrtm(input_data.detach().cpu().numpy().astype(np.float_), disp=False) + return torch.from_numpy(scipy_res) + + +def compute_frechet_distance( + mu_x: torch.Tensor, sigma_x: torch.Tensor, mu_y: torch.Tensor, sigma_y: torch.Tensor, epsilon: float = 1e-6 +) -> torch.Tensor: + """The Frechet distance between multivariate normal distributions.""" + diff = mu_x - mu_y + + covmean = _sqrtm(sigma_x.mm(sigma_y)) + + # Product might be almost singular + if not torch.isfinite(covmean).all(): + print(f"FID calculation produces singular product; adding {epsilon} to diagonal of covariance estimates") + offset = torch.eye(sigma_x.size(0), device=mu_x.device, dtype=mu_x.dtype) * epsilon + covmean = _sqrtm((sigma_x + offset).mm(sigma_y + offset)) + + # Numerical error might give slight imaginary component + if torch.is_complex(covmean): + if not torch.allclose(torch.diagonal(covmean).imag, torch.tensor(0, dtype=torch.double), atol=1e-3): + raise ValueError(f"Imaginary component {torch.max(torch.abs(covmean.imag))} too high.") + covmean = covmean.real + + tr_covmean = torch.trace(covmean) + return diff.dot(diff) + torch.trace(sigma_x) + torch.trace(sigma_y) - 2 * tr_covmean diff --git a/tests/test_compute_fid_metric.py b/tests/test_compute_fid_metric.py new file mode 100644 index 0000000000..63ad416895 --- /dev/null +++ b/tests/test_compute_fid_metric.py @@ -0,0 +1,35 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np +import torch + +from monai.metrics import FIDMetric + + +class TestFIDMetric(unittest.TestCase): + def test_results(self): + x = torch.Tensor([[1, 2], [1, 2], [1, 2]]) + y = torch.Tensor([[2, 2], [1, 2], [1, 2]]) + results = FIDMetric()(x, y) + np.testing.assert_allclose(results.cpu().numpy(), 0.4444, atol=1e-4) + + def test_input_dimensions(self): + with self.assertRaises(ValueError): + FIDMetric()(torch.ones([3, 3, 144, 144]), torch.ones([3, 3, 145, 145])) + + +if __name__ == "__main__": + unittest.main() From c98c4e797d5c4f8e9aa00cc2b716b1b64c0276b9 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Mon, 7 Aug 2023 16:49:52 +0100 Subject: [PATCH 02/15] Adds MMD metric Signed-off-by: Mark Graham --- monai/metrics/__init__.py | 1 + monai/metrics/mmd.py | 87 ++++++++++++++++++++++++++++++++ tests/test_compute_mmd_metric.py | 49 ++++++++++++++++++ 3 files changed, 137 insertions(+) create mode 100644 monai/metrics/mmd.py create mode 100644 tests/test_compute_mmd_metric.py diff --git a/monai/metrics/__init__.py b/monai/metrics/__init__.py index 45994f7d08..82a763ad47 100644 --- a/monai/metrics/__init__.py +++ b/monai/metrics/__init__.py @@ -23,6 +23,7 @@ from .meandice import DiceHelper, DiceMetric, compute_dice from .meaniou import MeanIoU, compute_iou, compute_meaniou from .metric import Cumulative, CumulativeIterationMetric, IterationMetric, Metric +from .mmd import MMDMetric, compute_mmd from .panoptic_quality import PanopticQualityMetric, compute_panoptic_quality from .regression import MAEMetric, MSEMetric, PSNRMetric, RMSEMetric, SSIMMetric from .rocauc import ROCAUCMetric, compute_roc_auc diff --git a/monai/metrics/mmd.py b/monai/metrics/mmd.py new file mode 100644 index 0000000000..42324409d8 --- /dev/null +++ b/monai/metrics/mmd.py @@ -0,0 +1,87 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from collections.abc import Callable + +import torch + +from monai.metrics.metric import Metric + + +class MMDMetric(Metric): + """ + Unbiased Maximum Mean Discrepancy (MMD) is a kernel-based method for measuring the similarity between two + distributions. It is a non-negative metric where a smaller value indicates a closer match between the two + distributions. + + Gretton, A., et al,, 2012. A kernel two-sample test. The Journal of Machine Learning Research, 13(1), pp.723-773. + + Args: + y_transform: Callable to transform the y tensor before computing the metric. It is usually a Gaussian or Laplace + filter, but it can be any function that takes a tensor as input and returns a tensor as output such as a + feature extractor or an Identity function. + y_pred_transform: Callable to transform the y_pred tensor before computing the metric. + """ + + def __init__(self, y_transform: Callable | None = None, y_pred_transform: Callable | None = None) -> None: + super().__init__() + + self.y_transform = y_transform + self.y_pred_transform = y_pred_transform + + def __call__(self, y: torch.Tensor, y_pred: torch.Tensor) -> torch.Tensor: + return compute_mmd(y, y_pred, self.y_transform, self.y_pred_transform) + + +def compute_mmd( + y: torch.Tensor, y_pred: torch.Tensor, y_transform: Callable | None, y_pred_transform: Callable | None +) -> torch.Tensor: + """ + Args: + y: first sample (e.g., the reference image). Its shape is (B,C,W,H) for 2D data and (B,C,W,H,D) for 3D. + y_pred: second sample (e.g., the reconstructed image). It has similar shape as y. + """ + + # Beta and Gamma are not calculated since torch.mean is used at return + beta = 1.0 + gamma = 2.0 + + if y_transform is not None: + y = y_transform(y) + + if y_pred_transform is not None: + y_pred = y_pred_transform(y_pred) + + if y_pred.shape != y.shape: + raise ValueError( + "y_pred and y shapes dont match after being processed " + f"by their transforms, received y_pred: {y_pred.shape} and y: {y.shape}" + ) + + for d in range(len(y.shape) - 1, 1, -1): + y = y.squeeze(dim=d) + y_pred = y_pred.squeeze(dim=d) + + y = y.view(y.shape[0], -1) + y_pred = y_pred.view(y_pred.shape[0], -1) + + y_y = torch.mm(y, y.t()) + y_pred_y_pred = torch.mm(y_pred, y_pred.t()) + y_pred_y = torch.mm(y_pred, y.t()) + + y_y = y_y / y.shape[1] + y_pred_y_pred = y_pred_y_pred / y.shape[1] + y_pred_y = y_pred_y / y.shape[1] + + # Ref. 1 Eq. 3 (found under Lemma 6) + return beta * (torch.mean(y_y) + torch.mean(y_pred_y_pred)) - gamma * torch.mean(y_pred_y) diff --git a/tests/test_compute_mmd_metric.py b/tests/test_compute_mmd_metric.py new file mode 100644 index 0000000000..fcdc74cce6 --- /dev/null +++ b/tests/test_compute_mmd_metric.py @@ -0,0 +1,49 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.metrics import MMDMetric + +TEST_CASES = [ + [ + {"y_transform": None, "y_pred_transform": None}, + {"y": torch.ones([3, 3, 144, 144]), "y_pred": torch.ones([3, 3, 144, 144])}, + 0.0, + ], + [ + {"y_transform": None, "y_pred_transform": None}, + {"y": torch.ones([3, 3, 144, 144, 144]), "y_pred": torch.ones([3, 3, 144, 144, 144])}, + 0.0, + ], +] + + +class TestMMDMetric(unittest.TestCase): + @parameterized.expand(TEST_CASES) + def test_results(self, input_param, input_data, expected_val): + metric = MMDMetric(**input_param) + results = metric(**input_data) + np.testing.assert_allclose(results.detach().cpu().numpy(), expected_val, rtol=1e-4) + + def test_if_inputs_different_shapes(self): + with self.assertRaises(ValueError): + MMDMetric()(torch.ones([3, 3, 144, 144]), torch.ones([3, 3, 145, 145])) + + +if __name__ == "__main__": + unittest.main() From 4bbfa2a7af8a68b4baac12076749afeb52320f26 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Tue, 8 Aug 2023 08:47:21 +0100 Subject: [PATCH 03/15] Optional imports Signed-off-by: Mark Graham --- monai/metrics/fid.py | 6 ++++-- tests/test_compute_fid_metric.py | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/monai/metrics/fid.py b/monai/metrics/fid.py index 03ab7b4a9d..b7b5e3b23e 100644 --- a/monai/metrics/fid.py +++ b/monai/metrics/fid.py @@ -14,9 +14,11 @@ import numpy as np import torch -from scipy import linalg from monai.metrics.metric import Metric +from monai.utils import optional_import + +scipy, _ = optional_import("scipy") class FIDMetric(Metric): @@ -78,7 +80,7 @@ def _cov(input_data: torch.Tensor, rowvar: bool = True) -> torch.Tensor: def _sqrtm(input_data: torch.Tensor) -> torch.Tensor: """Compute the square root of a matrix.""" - scipy_res, _ = linalg.sqrtm(input_data.detach().cpu().numpy().astype(np.float_), disp=False) + scipy_res, _ = scipy.linalg.sqrtm(input_data.detach().cpu().numpy().astype(np.float_), disp=False) return torch.from_numpy(scipy_res) diff --git a/tests/test_compute_fid_metric.py b/tests/test_compute_fid_metric.py index 63ad416895..1c7c3273fe 100644 --- a/tests/test_compute_fid_metric.py +++ b/tests/test_compute_fid_metric.py @@ -17,8 +17,12 @@ import torch from monai.metrics import FIDMetric +from monai.utils import optional_import +_, has_scipy = optional_import("scipy") + +@unittest.skipUnless(has_scipy, "Requires scipy") class TestFIDMetric(unittest.TestCase): def test_results(self): x = torch.Tensor([[1, 2], [1, 2], [1, 2]]) From 917e50468fbcd2753e676e9cca24b40c7069b198 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Tue, 8 Aug 2023 13:11:33 +0100 Subject: [PATCH 04/15] Adds ms-ssim metric Signed-off-by: Mark Graham --- monai/metrics/__init__.py | 11 +- monai/metrics/regression.py | 160 ++++++++++++++++++++ tests/test_compute_multiscalessim_metric.py | 82 ++++++++++ 3 files changed, 252 insertions(+), 1 deletion(-) create mode 100644 tests/test_compute_multiscalessim_metric.py diff --git a/monai/metrics/__init__.py b/monai/metrics/__init__.py index 82a763ad47..3809f59d2d 100644 --- a/monai/metrics/__init__.py +++ b/monai/metrics/__init__.py @@ -25,7 +25,16 @@ from .metric import Cumulative, CumulativeIterationMetric, IterationMetric, Metric from .mmd import MMDMetric, compute_mmd from .panoptic_quality import PanopticQualityMetric, compute_panoptic_quality -from .regression import MAEMetric, MSEMetric, PSNRMetric, RMSEMetric, SSIMMetric +from .regression import ( + MAEMetric, + MSEMetric, + MultiScaleSSIMMetric, + PSNRMetric, + RMSEMetric, + SSIMMetric, + compute_ms_ssim, + compute_ssim_and_cs, +) from .rocauc import ROCAUCMetric, compute_roc_auc from .surface_dice import SurfaceDiceMetric, compute_surface_dice from .surface_distance import SurfaceDistanceMetric, compute_average_surface_distance diff --git a/monai/metrics/regression.py b/monai/metrics/regression.py index c315a2eac0..341a8621d3 100644 --- a/monai/metrics/regression.py +++ b/monai/metrics/regression.py @@ -441,3 +441,163 @@ def compute_ssim_and_cs( ssim_value_full_image = ((2 * mu_x * mu_y + c1) / (mu_x**2 + mu_y**2 + c1)) * contrast_sensitivity return ssim_value_full_image, contrast_sensitivity + + +class MultiScaleSSIMMetric(RegressionMetric): + """ + Computes the Multi-Scale Structural Similarity Index Measure (MS-SSIM). + + [1] Wang, Z., Simoncelli, E.P. and Bovik, A.C., 2003, November. + Multiscale structural similarity for image quality assessment. + In The Thirty-Seventh Asilomar Conference on Signals, Systems + & Computers, 2003 (Vol. 2, pp. 1398-1402). Ieee. + + Args: + spatial_dims: number of spatial dimensions of the input images. + data_range: value range of input images. (usually 1.0 or 255) + kernel_type: type of kernel, can be "gaussian" or "uniform". + kernel_size: size of kernel + kernel_sigma: standard deviation for Gaussian kernel. + k1: stability constant used in the luminance denominator + k2: stability constant used in the contrast denominator + weights: parameters for image similarity and contrast sensitivity at different resolution scores. + reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values, + available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``, + ``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction + get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans) + """ + + def __init__( + self, + spatial_dims: int, + data_range: float = 1.0, + kernel_type: KernelType | str = KernelType.GAUSSIAN, + kernel_size: int | Sequence[int] = 11, + kernel_sigma: float | Sequence[float] = 1.5, + k1: float = 0.01, + k2: float = 0.03, + weights: Sequence[float] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333), + reduction: MetricReduction | str = MetricReduction.MEAN, + get_not_nans: bool = False, + ) -> None: + super().__init__(reduction=reduction, get_not_nans=get_not_nans) + + self.spatial_dims = spatial_dims + self.data_range = data_range + self.kernel_type = kernel_type + + if not isinstance(kernel_size, Sequence): + kernel_size = ensure_tuple_rep(kernel_size, spatial_dims) + self.kernel_size = kernel_size + + if not isinstance(kernel_sigma, Sequence): + kernel_sigma = ensure_tuple_rep(kernel_sigma, spatial_dims) + self.kernel_sigma = kernel_sigma + + self.k1 = k1 + self.k2 = k2 + self.weights = weights + + def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + return compute_ms_ssim( + y_pred=y_pred, + y=y, + spatial_dims=self.spatial_dims, + data_range=self.data_range, + kernel_type=self.kernel_type, + kernel_size=self.kernel_size, + kernel_sigma=self.kernel_sigma, + k1=self.k1, + k2=self.k2, + weights=self.weights, + ) + + +def compute_ms_ssim( + y_pred: torch.Tensor, + y: torch.Tensor, + spatial_dims: int, + data_range: float = 1.0, + kernel_type: KernelType | str = KernelType.GAUSSIAN, + kernel_size: int | Sequence[int] = 11, + kernel_sigma: float | Sequence[float] = 1.5, + k1: float = 0.01, + k2: float = 0.03, + weights: Sequence[float] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333), +) -> torch.Tensor: + """ + Args: + y_pred: Predicted image. + It must be a 2D or 3D batch-first tensor [B,C,H,W] or [B,C,H,W,D]. + y: Reference image. + It must be a 2D or 3D batch-first tensor [B,C,H,W] or [B,C,H,W,D]. + spatial_dims: number of spatial dimensions of the input images. + data_range: value range of input images. (usually 1.0 or 255) + kernel_type: type of kernel, can be "gaussian" or "uniform". + kernel_size: size of kernel + kernel_sigma: standard deviation for Gaussian kernel. + k1: stability constant used in the luminance denominator + k2: stability constant used in the contrast denominator + weights: parameters for image similarity and contrast sensitivity at different resolution scores. + Raises: + ValueError: when `y_pred` is not a 2D or 3D image. + """ + dims = y_pred.ndimension() + if spatial_dims == 2 and dims != 4: + raise ValueError( + f"y_pred should have 4 dimensions (batch, channel, height, width) when using {spatial_dims} " + f"spatial dimensions, got {dims}." + ) + + if spatial_dims == 3 and dims != 5: + raise ValueError( + f"y_pred should have 4 dimensions (batch, channel, height, width, depth) when using {spatial_dims}" + f" spatial dimensions, got {dims}." + ) + + # check if image have enough size for the number of downsamplings and the size of the kernel + weights_div = max(1, (len(weights) - 1)) ** 2 + y_pred_spatial_dims = y_pred.shape[2:] + for i in range(len(y_pred_spatial_dims)): + if y_pred_spatial_dims[i] // weights_div <= kernel_size[i] - 1: + raise ValueError( + f"For a given number of `weights` parameters {len(weights)} and kernel size " + f"{kernel_size[i]}, the image height must be larger than " + f"{(kernel_size[i] - 1) * weights_div}." + ) + + weights = torch.tensor(weights, device=y_pred.device, dtype=torch.float) + + avg_pool = getattr(F, f"avg_pool{spatial_dims}d") + + multiscale_list: list[torch.Tensor] = [] + for _ in range(len(weights)): + ssim, cs = compute_ssim_and_cs( + y_pred=y_pred, + y=y, + spatial_dims=spatial_dims, + data_range=data_range, + kernel_type=kernel_type, + kernel_size=kernel_size, + kernel_sigma=kernel_sigma, + k1=k1, + k2=k2, + ) + + cs_per_batch = cs.view(cs.shape[0], -1).mean(1) + + multiscale_list.append(torch.relu(cs_per_batch)) + y_pred = avg_pool(y_pred, kernel_size=2) + y = avg_pool(y, kernel_size=2) + + ssim = ssim.view(ssim.shape[0], -1).mean(1) + multiscale_list[-1] = torch.relu(ssim) + multiscale_list = torch.stack(multiscale_list) + + ms_ssim_value_full_image = torch.prod(multiscale_list ** weights.view(-1, 1), dim=0) + + ms_ssim_per_batch: torch.Tensor = ms_ssim_value_full_image.view(ms_ssim_value_full_image.shape[0], -1).mean( + 1, keepdim=True + ) + + return ms_ssim_per_batch diff --git a/tests/test_compute_multiscalessim_metric.py b/tests/test_compute_multiscalessim_metric.py new file mode 100644 index 0000000000..4ebc5b7935 --- /dev/null +++ b/tests/test_compute_multiscalessim_metric.py @@ -0,0 +1,82 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import torch + +from monai.metrics import MultiScaleSSIMMetric +from monai.utils import set_determinism + + +class TestMultiScaleSSIMMetric(unittest.TestCase): + def test2d_gaussian(self): + set_determinism(0) + preds = torch.abs(torch.randn(1, 1, 64, 64)) + target = torch.abs(torch.randn(1, 1, 64, 64)) + preds = preds / preds.max() + target = target / target.max() + + metric = MultiScaleSSIMMetric(spatial_dims=2, data_range=1.0, kernel_type="gaussian", weights=[0.5, 0.5]) + metric(preds, target) + result = metric.aggregate() + expected_value = 0.023176 + self.assertTrue(expected_value - result.item() < 0.000001) + + def test2d_uniform(self): + set_determinism(0) + preds = torch.abs(torch.randn(1, 1, 64, 64)) + target = torch.abs(torch.randn(1, 1, 64, 64)) + preds = preds / preds.max() + target = target / target.max() + + metric = MultiScaleSSIMMetric(spatial_dims=2, data_range=1.0, kernel_type="uniform", weights=[0.5, 0.5]) + metric(preds, target) + result = metric.aggregate() + expected_value = 0.022655 + self.assertTrue(expected_value - result.item() < 0.000001) + + def test3d_gaussian(self): + set_determinism(0) + preds = torch.abs(torch.randn(1, 1, 64, 64, 64)) + target = torch.abs(torch.randn(1, 1, 64, 64, 64)) + preds = preds / preds.max() + target = target / target.max() + + metric = MultiScaleSSIMMetric(spatial_dims=3, data_range=1.0, kernel_type="gaussian", weights=[0.5, 0.5]) + metric(preds, target) + result = metric.aggregate() + expected_value = 0.061796 + self.assertTrue(expected_value - result.item() < 0.000001) + + def input_ill_input_shape2d(self): + metric = MultiScaleSSIMMetric(spatial_dims=3, weights=[0.5, 0.5]) + + with self.assertRaises(ValueError): + metric(torch.randn(1, 1, 64, 64), torch.randn(1, 1, 64, 64)) + + def input_ill_input_shape3d(self): + metric = MultiScaleSSIMMetric(spatial_dims=2, weights=[0.5, 0.5]) + + with self.assertRaises(ValueError): + metric(torch.randn(1, 1, 64, 64, 64), torch.randn(1, 1, 64, 64, 64)) + + def small_inputs(self): + metric = MultiScaleSSIMMetric(spatial_dims=2) + + with self.assertRaises(ValueError): + metric(torch.randn(1, 1, 16, 16, 16), torch.randn(1, 1, 16, 16, 16)) + + +if __name__ == "__main__": + unittest.main() From 3f9a49294e5ce9158478e3f10db4ab4e4484f7f3 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Tue, 8 Aug 2023 16:28:33 +0100 Subject: [PATCH 05/15] Mypy fixes --- monai/metrics/regression.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/monai/metrics/regression.py b/monai/metrics/regression.py index 341a8621d3..1ab7def39d 100644 --- a/monai/metrics/regression.py +++ b/monai/metrics/regression.py @@ -555,6 +555,11 @@ def compute_ms_ssim( f" spatial dimensions, got {dims}." ) + if not isinstance(kernel_size, Sequence): + kernel_size = ensure_tuple_rep(kernel_size, spatial_dims) + + if not isinstance(kernel_sigma, Sequence): + kernel_sigma = ensure_tuple_rep(kernel_sigma, spatial_dims) # check if image have enough size for the number of downsamplings and the size of the kernel weights_div = max(1, (len(weights) - 1)) ** 2 y_pred_spatial_dims = y_pred.shape[2:] @@ -566,12 +571,12 @@ def compute_ms_ssim( f"{(kernel_size[i] - 1) * weights_div}." ) - weights = torch.tensor(weights, device=y_pred.device, dtype=torch.float) + weights_tensor = torch.tensor(weights, device=y_pred.device, dtype=torch.float) avg_pool = getattr(F, f"avg_pool{spatial_dims}d") multiscale_list: list[torch.Tensor] = [] - for _ in range(len(weights)): + for _ in range(len(weights_tensor)): ssim, cs = compute_ssim_and_cs( y_pred=y_pred, y=y, @@ -592,9 +597,9 @@ def compute_ms_ssim( ssim = ssim.view(ssim.shape[0], -1).mean(1) multiscale_list[-1] = torch.relu(ssim) - multiscale_list = torch.stack(multiscale_list) + multiscale_list_tensor = torch.stack(multiscale_list) - ms_ssim_value_full_image = torch.prod(multiscale_list ** weights.view(-1, 1), dim=0) + ms_ssim_value_full_image = torch.prod(multiscale_list_tensor ** weights_tensor.view(-1, 1), dim=0) ms_ssim_per_batch: torch.Tensor = ms_ssim_value_full_image.view(ms_ssim_value_full_image.shape[0], -1).mean( 1, keepdim=True From b7734c68e58c8ef0866bd560bfe45e8ae2667978 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Thu, 10 Aug 2023 13:29:14 +0100 Subject: [PATCH 06/15] DCO Remediation Commit for Mark Graham I, Mark Graham , hereby add my Signed-off-by to this commit: 3f9a49294e5ce9158478e3f10db4ab4e4484f7f3 Signed-off-by: Mark Graham --- monai/metrics/regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/metrics/regression.py b/monai/metrics/regression.py index 1ab7def39d..727e7bb8ab 100644 --- a/monai/metrics/regression.py +++ b/monai/metrics/regression.py @@ -589,7 +589,7 @@ def compute_ms_ssim( k2=k2, ) - cs_per_batch = cs.view(cs.shape[0], -1).mean(1) + cs_per_batch = cs.view(cs.shape[0], -1).mean(1) multiscale_list.append(torch.relu(cs_per_batch)) y_pred = avg_pool(y_pred, kernel_size=2) From edf274606f36c4d78d247af35102a827217d7e2e Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Thu, 10 Aug 2023 13:29:34 +0100 Subject: [PATCH 07/15] Undo minor change for DCO commit Signed-off-by: Mark Graham --- monai/metrics/regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/metrics/regression.py b/monai/metrics/regression.py index 727e7bb8ab..1ab7def39d 100644 --- a/monai/metrics/regression.py +++ b/monai/metrics/regression.py @@ -589,7 +589,7 @@ def compute_ms_ssim( k2=k2, ) - cs_per_batch = cs.view(cs.shape[0], -1).mean(1) + cs_per_batch = cs.view(cs.shape[0], -1).mean(1) multiscale_list.append(torch.relu(cs_per_batch)) y_pred = avg_pool(y_pred, kernel_size=2) From 3c8112870d9d11ba0ffbaa8eed7a185a219f9e29 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Fri, 11 Aug 2023 04:42:45 -0600 Subject: [PATCH 08/15] Update monai/metrics/fid.py Co-authored-by: Wenqi Li <831580+wyli@users.noreply.github.com> Signed-off-by: Mark Graham --- monai/metrics/fid.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/monai/metrics/fid.py b/monai/metrics/fid.py index b7b5e3b23e..c99d2d14f9 100644 --- a/monai/metrics/fid.py +++ b/monai/metrics/fid.py @@ -34,9 +34,6 @@ class FIDMetric(Metric): average pooling. """ - def __init__(self) -> None: - super().__init__() - def __call__(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return get_fid_score(y_pred, y) From 78cc8cc937fdf6690aa6e8e5c76932c69922477e Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Fri, 11 Aug 2023 13:27:44 +0100 Subject: [PATCH 09/15] Update docs Signed-off-by: Mark Graham --- docs/source/metrics.rst | 20 ++++++++++++++++++++ monai/metrics/fid.py | 2 +- monai/metrics/regression.py | 8 ++++---- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/docs/source/metrics.rst b/docs/source/metrics.rst index 51b474cfa8..5aab94c791 100644 --- a/docs/source/metrics.rst +++ b/docs/source/metrics.rst @@ -141,6 +141,24 @@ Metrics ------------------------------------- .. autoclass:: monai.metrics.regression.SSIMMetric +`Multi-scale structural similarity index measure` +------------------------------------------------- +.. autoclass:: MultiScaleSSIMMetric + +`Fréchet Inception Distance` +------------------------------ +.. autofunction:: compute_frechet_distance + +.. autoclass:: FIDMetric + :members: + +`Maximum Mean Discrepancy` +------------------------------ +.. autofunction:: compute_mmd + +.. autoclass:: MMDMetric + :members: + `Cumulative average` -------------------- .. autoclass:: CumulativeAverage @@ -156,6 +174,8 @@ Metrics .. autoclass:: MetricsReloadedCategorical :members: + + Utilities --------- .. automodule:: monai.metrics.utils diff --git a/monai/metrics/fid.py b/monai/metrics/fid.py index c99d2d14f9..2e8f783ae6 100644 --- a/monai/metrics/fid.py +++ b/monai/metrics/fid.py @@ -25,7 +25,7 @@ class FIDMetric(Metric): """ Frechet Inception Distance (FID). The FID calculates the distance between two distributions of feature vectors. Based on: Heusel M. et al. "Gans trained by a two time-scale update rule converge to a local nash equilibrium." - https://arxiv.org/abs/1706.08500#. The inputs for this metric should be two groups of feature vectors (with format + https://arxiv.org/abs/1706.08500. The inputs for this metric should be two groups of feature vectors (with format (number images, number of features)) extracted from the a pretrained network. Originally, it was proposed to use the activations of the pool_3 layer of an Inception v3 pretrained with Imagenet. diff --git a/monai/metrics/regression.py b/monai/metrics/regression.py index 1ab7def39d..6265bfa378 100644 --- a/monai/metrics/regression.py +++ b/monai/metrics/regression.py @@ -447,10 +447,10 @@ class MultiScaleSSIMMetric(RegressionMetric): """ Computes the Multi-Scale Structural Similarity Index Measure (MS-SSIM). - [1] Wang, Z., Simoncelli, E.P. and Bovik, A.C., 2003, November. - Multiscale structural similarity for image quality assessment. - In The Thirty-Seventh Asilomar Conference on Signals, Systems - & Computers, 2003 (Vol. 2, pp. 1398-1402). Ieee. + MS-SSIM reference paper: + Wang, Z., Simoncelli, E.P. and Bovik, A.C., 2003, November. "Multiscale structural + similarity for image quality assessment." In The Thirty-Seventh Asilomar Conference + on Signals, Systems & Computers, 2003 (Vol. 2, pp. 1398-1402). IEEE Args: spatial_dims: number of spatial dimensions of the input images. From fb8ee0832ab8166de1174970b29e2fd8c236fd73 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Mon, 14 Aug 2023 12:17:40 +0100 Subject: [PATCH 10/15] Updates docstring Signed-off-by: Mark Graham --- monai/metrics/fid.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/monai/metrics/fid.py b/monai/metrics/fid.py index 2e8f783ae6..194d596f67 100644 --- a/monai/metrics/fid.py +++ b/monai/metrics/fid.py @@ -26,12 +26,12 @@ class FIDMetric(Metric): Frechet Inception Distance (FID). The FID calculates the distance between two distributions of feature vectors. Based on: Heusel M. et al. "Gans trained by a two time-scale update rule converge to a local nash equilibrium." https://arxiv.org/abs/1706.08500. The inputs for this metric should be two groups of feature vectors (with format - (number images, number of features)) extracted from the a pretrained network. + (number images, number of features)) extracted from a pretrained network. Originally, it was proposed to use the activations of the pool_3 layer of an Inception v3 pretrained with Imagenet. However, others networks pretrained on medical datasets can be used as well (for example, RadImageNwt for 2D and - MedicalNet for 3D images). If the chosen model output is not a scalar, usually it is used a global spatial - average pooling. + MedicalNet for 3D images). If the chosen model output is not a scalar, a global spatia average pooling should be + used. """ def __call__(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: @@ -39,6 +39,12 @@ def __call__(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: def get_fid_score(y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + """Computes the FID score metric on a batch of feature vectors. + + Args: + y_pred: feature vectors extracted from a pretrained network run on generated images. + y: feature vectors extracted from a pretrained network run on images from the real data distribution. + """ y = y.double() y_pred = y_pred.double() From 5efe5b05b94b1be115e1b4ee284779719c062cd3 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Tue, 15 Aug 2023 12:01:25 +0100 Subject: [PATCH 11/15] Updates MMD calculation to match original paper, and provide just a single mapping for y Signed-off-by: Mark Graham --- monai/metrics/mmd.py | 50 +++++++++++++++++--------------- monai/metrics/regression.py | 2 +- tests/test_compute_mmd_metric.py | 6 ++-- 3 files changed, 31 insertions(+), 27 deletions(-) diff --git a/monai/metrics/mmd.py b/monai/metrics/mmd.py index 42324409d8..1f6215debc 100644 --- a/monai/metrics/mmd.py +++ b/monai/metrics/mmd.py @@ -27,40 +27,30 @@ class MMDMetric(Metric): Gretton, A., et al,, 2012. A kernel two-sample test. The Journal of Machine Learning Research, 13(1), pp.723-773. Args: - y_transform: Callable to transform the y tensor before computing the metric. It is usually a Gaussian or Laplace + y_mapping: Callable to transform the y tensors before computing the metric. It is usually a Gaussian or Laplace filter, but it can be any function that takes a tensor as input and returns a tensor as output such as a - feature extractor or an Identity function. - y_pred_transform: Callable to transform the y_pred tensor before computing the metric. + feature extractor or an Identity function., e.g. `y_mapping = lambda x: x.square()`. """ - def __init__(self, y_transform: Callable | None = None, y_pred_transform: Callable | None = None) -> None: + def __init__(self, y_mapping: Callable | None = None) -> None: super().__init__() - - self.y_transform = y_transform - self.y_pred_transform = y_pred_transform + self.y_mapping = y_mapping def __call__(self, y: torch.Tensor, y_pred: torch.Tensor) -> torch.Tensor: - return compute_mmd(y, y_pred, self.y_transform, self.y_pred_transform) + return compute_mmd(y, y_pred, self.y_mapping) -def compute_mmd( - y: torch.Tensor, y_pred: torch.Tensor, y_transform: Callable | None, y_pred_transform: Callable | None -) -> torch.Tensor: +def compute_mmd(y: torch.Tensor, y_pred: torch.Tensor, y_mapping: Callable | None) -> torch.Tensor: """ Args: y: first sample (e.g., the reference image). Its shape is (B,C,W,H) for 2D data and (B,C,W,H,D) for 3D. y_pred: second sample (e.g., the reconstructed image). It has similar shape as y. + y_mapping: Callable to transform the y tensors before computing the metric. """ - # Beta and Gamma are not calculated since torch.mean is used at return - beta = 1.0 - gamma = 2.0 - - if y_transform is not None: - y = y_transform(y) - - if y_pred_transform is not None: - y_pred = y_pred_transform(y_pred) + if y_mapping is not None: + y = y_mapping(y) + y_pred = y_mapping(y_pred) if y_pred.shape != y.shape: raise ValueError( @@ -79,9 +69,21 @@ def compute_mmd( y_pred_y_pred = torch.mm(y_pred, y_pred.t()) y_pred_y = torch.mm(y_pred, y.t()) - y_y = y_y / y.shape[1] - y_pred_y_pred = y_pred_y_pred / y.shape[1] - y_pred_y = y_pred_y / y.shape[1] + m = y.shape[0] + n = y_pred.shape[0] # Ref. 1 Eq. 3 (found under Lemma 6) - return beta * (torch.mean(y_y) + torch.mean(y_pred_y_pred)) - gamma * torch.mean(y_pred_y) + # term 1 + c1 = 1 / (m * (m - 1)) + A = torch.sum(y_y - torch.diag(torch.diagonal(y_y))) + + # term 2 + c2 = 1 / (n * (n - 1)) + B = torch.sum(y_pred_y_pred - torch.diag(torch.diagonal(y_pred_y_pred))) + + # term 3 + c3 = 2 / (m * n) + C = torch.sum(y_pred_y) + + mmd = c1 * A + c2 * B - c3 * C + return mmd diff --git a/monai/metrics/regression.py b/monai/metrics/regression.py index 6265bfa378..f37230f09e 100644 --- a/monai/metrics/regression.py +++ b/monai/metrics/regression.py @@ -531,7 +531,7 @@ def compute_ms_ssim( It must be a 2D or 3D batch-first tensor [B,C,H,W] or [B,C,H,W,D]. y: Reference image. It must be a 2D or 3D batch-first tensor [B,C,H,W] or [B,C,H,W,D]. - spatial_dims: number of spatial dimensions of the input images. + spatial_dims: number of spatial dimensions of the input images. data_range: value range of input images. (usually 1.0 or 255) kernel_type: type of kernel, can be "gaussian" or "uniform". kernel_size: size of kernel diff --git a/tests/test_compute_mmd_metric.py b/tests/test_compute_mmd_metric.py index fcdc74cce6..b1c2fb5915 100644 --- a/tests/test_compute_mmd_metric.py +++ b/tests/test_compute_mmd_metric.py @@ -20,13 +20,15 @@ from monai.metrics import MMDMetric TEST_CASES = [ + [{"y_mapping": None}, {"y": torch.ones([3, 3, 144, 144]), "y_pred": torch.ones([3, 3, 144, 144])}, 0.0], + [{"y_mapping": None}, {"y": torch.ones([3, 3, 144, 144, 144]), "y_pred": torch.ones([3, 3, 144, 144, 144])}, 0.0], [ - {"y_transform": None, "y_pred_transform": None}, + {"y_mapping": lambda x: x.square()}, {"y": torch.ones([3, 3, 144, 144]), "y_pred": torch.ones([3, 3, 144, 144])}, 0.0, ], [ - {"y_transform": None, "y_pred_transform": None}, + {"y_mapping": lambda x: x.square()}, {"y": torch.ones([3, 3, 144, 144, 144]), "y_pred": torch.ones([3, 3, 144, 144, 144])}, 0.0, ], From a5cdba920826985288b579ed8dd2a8efca420528 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Tue, 15 Aug 2023 15:26:20 +0100 Subject: [PATCH 12/15] Make variables lowercase Signed-off-by: Mark Graham --- monai/metrics/mmd.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/monai/metrics/mmd.py b/monai/metrics/mmd.py index 1f6215debc..a221612e41 100644 --- a/monai/metrics/mmd.py +++ b/monai/metrics/mmd.py @@ -75,15 +75,15 @@ def compute_mmd(y: torch.Tensor, y_pred: torch.Tensor, y_mapping: Callable | Non # Ref. 1 Eq. 3 (found under Lemma 6) # term 1 c1 = 1 / (m * (m - 1)) - A = torch.sum(y_y - torch.diag(torch.diagonal(y_y))) + a = torch.sum(y_y - torch.diag(torch.diagonal(y_y))) # term 2 c2 = 1 / (n * (n - 1)) - B = torch.sum(y_pred_y_pred - torch.diag(torch.diagonal(y_pred_y_pred))) + b = torch.sum(y_pred_y_pred - torch.diag(torch.diagonal(y_pred_y_pred))) # term 3 c3 = 2 / (m * n) - C = torch.sum(y_pred_y) + b = torch.sum(y_pred_y) - mmd = c1 * A + c2 * B - c3 * C + mmd = c1 * a + c2 * b - c3 * c return mmd From 20de3563a1272ffcaa154a02c9d8932b6907a0bd Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Tue, 15 Aug 2023 15:35:23 +0100 Subject: [PATCH 13/15] Fix variable name Signed-off-by: Mark Graham --- monai/metrics/mmd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/metrics/mmd.py b/monai/metrics/mmd.py index a221612e41..ea36b71dc7 100644 --- a/monai/metrics/mmd.py +++ b/monai/metrics/mmd.py @@ -83,7 +83,7 @@ def compute_mmd(y: torch.Tensor, y_pred: torch.Tensor, y_mapping: Callable | Non # term 3 c3 = 2 / (m * n) - b = torch.sum(y_pred_y) + c = torch.sum(y_pred_y) mmd = c1 * a + c2 * b - c3 * c return mmd From 39206440c769878174d51ece4e2f3087223ebb4c Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Wed, 16 Aug 2023 16:20:03 +0100 Subject: [PATCH 14/15] Add check for batch_size=1 Signed-off-by: Mark Graham --- monai/metrics/mmd.py | 2 ++ tests/test_compute_mmd_metric.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/monai/metrics/mmd.py b/monai/metrics/mmd.py index ea36b71dc7..5ba4cdf1b4 100644 --- a/monai/metrics/mmd.py +++ b/monai/metrics/mmd.py @@ -47,6 +47,8 @@ def compute_mmd(y: torch.Tensor, y_pred: torch.Tensor, y_mapping: Callable | Non y_pred: second sample (e.g., the reconstructed image). It has similar shape as y. y_mapping: Callable to transform the y tensors before computing the metric. """ + if y_pred.shape[0] == 1 or y.shape[0] == 1: + raise ValueError("MMD metric requires at least two samples in y and y_pred.") if y_mapping is not None: y = y_mapping(y) diff --git a/tests/test_compute_mmd_metric.py b/tests/test_compute_mmd_metric.py index b1c2fb5915..da6e16bd86 100644 --- a/tests/test_compute_mmd_metric.py +++ b/tests/test_compute_mmd_metric.py @@ -46,6 +46,9 @@ def test_if_inputs_different_shapes(self): with self.assertRaises(ValueError): MMDMetric()(torch.ones([3, 3, 144, 144]), torch.ones([3, 3, 145, 145])) + def test_if_inputs_have_one_sample(self): + with self.assertRaises(ValueError): + MMDMetric()(torch.ones([1, 3, 144, 144]), torch.ones([1, 3, 144, 144])) if __name__ == "__main__": unittest.main() From 4b0ec8f187ea823564f43d1f2c3c555e36706913 Mon Sep 17 00:00:00 2001 From: Mark Graham Date: Wed, 16 Aug 2023 16:26:16 +0100 Subject: [PATCH 15/15] Fixes formatting error Signed-off-by: Mark Graham --- tests/test_compute_mmd_metric.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_compute_mmd_metric.py b/tests/test_compute_mmd_metric.py index da6e16bd86..d1b69b3dfe 100644 --- a/tests/test_compute_mmd_metric.py +++ b/tests/test_compute_mmd_metric.py @@ -50,5 +50,6 @@ def test_if_inputs_have_one_sample(self): with self.assertRaises(ValueError): MMDMetric()(torch.ones([1, 3, 144, 144]), torch.ones([1, 3, 144, 144])) + if __name__ == "__main__": unittest.main()