diff --git a/tests/test_affine_transform.py b/tests/test_affine_transform.py index 6ea036bce8..8a714104b8 100644 --- a/tests/test_affine_transform.py +++ b/tests/test_affine_transform.py @@ -133,29 +133,18 @@ def test_to_norm_affine_ill(self, affine, src_size, dst_size, align_corners): class TestAffineTransform(unittest.TestCase): - def test_affine_shift(self): - affine = torch.as_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, -1.0]]) + @parameterized.expand([ + ("shift", torch.as_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, -1.0]]), [[[[0, 4, 1, 3], [0, 7, 6, 8], [0, 3, 5, 3]]]]), + ("shift_1", torch.as_tensor([[1.0, 0.0, -1.0], [0.0, 1.0, -1.0]]), + [[[[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]]]), + ("shift_2", torch.as_tensor([[1.0, 0.0, -1.0], [0.0, 1.0, 0.0]]), + [[[[0, 0, 0, 0], [4, 1, 3, 2], [7, 6, 8, 5]]]]), + ]) + def test_affine_transforms(self, name, affine, expected): image = torch.as_tensor([[[[4.0, 1.0, 3.0, 2.0], [7.0, 6.0, 8.0, 5.0], [3.0, 5.0, 3.0, 6.0]]]]) out = AffineTransform(align_corners=False)(image, affine) out = out.detach().cpu().numpy() - expected = [[[[0, 4, 1, 3], [0, 7, 6, 8], [0, 3, 5, 3]]]] - np.testing.assert_allclose(out, expected, atol=1e-5, rtol=_rtol) - - def test_affine_shift_1(self): - affine = torch.as_tensor([[1.0, 0.0, -1.0], [0.0, 1.0, -1.0]]) - image = torch.as_tensor([[[[4.0, 1.0, 3.0, 2.0], [7.0, 6.0, 8.0, 5.0], [3.0, 5.0, 3.0, 6.0]]]]) - out = AffineTransform(align_corners=False)(image, affine) - out = out.detach().cpu().numpy() - expected = [[[[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]]] - np.testing.assert_allclose(out, expected, atol=1e-5, rtol=_rtol) - - def test_affine_shift_2(self): - affine = torch.as_tensor([[1.0, 0.0, -1.0], [0.0, 1.0, 0.0]]) - image = torch.as_tensor([[[[4.0, 1.0, 3.0, 2.0], [7.0, 6.0, 8.0, 5.0], [3.0, 5.0, 3.0, 6.0]]]]) - out = AffineTransform(align_corners=False)(image, affine) - out = out.detach().cpu().numpy() - expected = [[[[0, 0, 0, 0], [4, 1, 3, 2], [7, 6, 8, 5]]]] - np.testing.assert_allclose(out, expected, atol=1e-5, rtol=_rtol) + np.testing.assert_allclose(out, expected, atol=1e-5, rtol=1e-5) def test_zoom(self): affine = torch.as_tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]]) diff --git a/tests/test_auto3dseg.py b/tests/test_auto3dseg.py index e2097679e2..6be33bf6ca 100644 --- a/tests/test_auto3dseg.py +++ b/tests/test_auto3dseg.py @@ -367,7 +367,6 @@ def test_filename_case_analyzer(self): for batch_data in self.dataset: d = transform(batch_data[0]) assert DataStatsKeys.BY_CASE_IMAGE_PATH in d - assert DataStatsKeys.BY_CASE_IMAGE_PATH in d def test_filename_case_analyzer_image_only(self): analyzer_image = FilenameStats("image", DataStatsKeys.BY_CASE_IMAGE_PATH) diff --git a/tests/test_bundle_ckpt_export.py b/tests/test_bundle_ckpt_export.py index 8f376a06d5..cfcadcfc4c 100644 --- a/tests/test_bundle_ckpt_export.py +++ b/tests/test_bundle_ckpt_export.py @@ -72,9 +72,9 @@ def test_export(self, key_in_ckpt, use_trace): _, metadata, extra_files = load_net_with_metadata( ts_file, more_extra_files=["inference.json", "def_args.json"] ) - self.assertTrue("schema" in metadata) - self.assertTrue("meta_file" in json.loads(extra_files["def_args.json"])) - self.assertTrue("network_def" in json.loads(extra_files["inference.json"])) + self.assertIn("schema", metadata) + self.assertIn("meta_file", json.loads(extra_files["def_args.json"])) + self.assertIn("network_def", json.loads(extra_files["inference.json"])) @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_default_value(self, key_in_ckpt, use_trace): diff --git a/tests/test_bundle_get_data.py b/tests/test_bundle_get_data.py index 605b3945bb..a5960aebe5 100644 --- a/tests/test_bundle_get_data.py +++ b/tests/test_bundle_get_data.py @@ -51,16 +51,16 @@ class TestGetBundleData(unittest.TestCase): def test_get_all_bundles_list(self, params): with skip_if_downloading_fails(): output = get_all_bundles_list(**params) - self.assertTrue(isinstance(output, list)) - self.assertTrue(isinstance(output[0], tuple)) - self.assertTrue(len(output[0]) == 2) + self.assertIsInstance(output, list) + self.assertIsInstance(output[0], tuple) + self.assertEqual(len(output[0]), 2) @parameterized.expand([TEST_CASE_1, TEST_CASE_5]) @skip_if_quick def test_get_bundle_versions(self, params): with skip_if_downloading_fails(): output = get_bundle_versions(**params) - self.assertTrue(isinstance(output, dict)) + self.assertIsInstance(output, dict) self.assertTrue("latest_version" in output and "all_versions" in output) self.assertTrue("0.1.0" in output["all_versions"]) @@ -69,7 +69,7 @@ def test_get_bundle_versions(self, params): def test_get_bundle_info(self, params): with skip_if_downloading_fails(): output = get_bundle_info(**params) - self.assertTrue(isinstance(output, dict)) + self.assertIsInstance(output, dict) for key in ["id", "name", "size", "download_count", "browser_download_url"]: self.assertTrue(key in output) @@ -78,7 +78,7 @@ def test_get_bundle_info(self, params): def test_get_bundle_info_monaihosting(self, params): with skip_if_downloading_fails(): output = get_bundle_info(**params) - self.assertTrue(isinstance(output, dict)) + self.assertIsInstance(output, dict) for key in ["name", "browser_download_url"]: self.assertTrue(key in output) diff --git a/tests/test_bundle_trt_export.py b/tests/test_bundle_trt_export.py index 47034852ef..833a0ca1dc 100644 --- a/tests/test_bundle_trt_export.py +++ b/tests/test_bundle_trt_export.py @@ -91,9 +91,9 @@ def test_trt_export(self, convert_precision, input_shape, dynamic_batch): _, metadata, extra_files = load_net_with_metadata( ts_file, more_extra_files=["inference.json", "def_args.json"] ) - self.assertTrue("schema" in metadata) - self.assertTrue("meta_file" in json.loads(extra_files["def_args.json"])) - self.assertTrue("network_def" in json.loads(extra_files["inference.json"])) + self.assertIn("schema", metadata) + self.assertIn("meta_file", json.loads(extra_files["def_args.json"])) + self.assertIn("network_def", json.loads(extra_files["inference.json"])) @parameterized.expand([TEST_CASE_3, TEST_CASE_4]) @unittest.skipUnless( @@ -129,9 +129,9 @@ def test_onnx_trt_export(self, convert_precision, input_shape, dynamic_batch): _, metadata, extra_files = load_net_with_metadata( ts_file, more_extra_files=["inference.json", "def_args.json"] ) - self.assertTrue("schema" in metadata) - self.assertTrue("meta_file" in json.loads(extra_files["def_args.json"])) - self.assertTrue("network_def" in json.loads(extra_files["inference.json"])) + self.assertIn("schema", metadata) + self.assertIn("meta_file", json.loads(extra_files["def_args.json"])) + self.assertIn("network_def", json.loads(extra_files["inference.json"])) if __name__ == "__main__": diff --git a/tests/test_bundle_workflow.py b/tests/test_bundle_workflow.py index 9a276b577f..1727fcdf53 100644 --- a/tests/test_bundle_workflow.py +++ b/tests/test_bundle_workflow.py @@ -138,11 +138,11 @@ def test_train_config(self, config_file): self.assertListEqual(trainer.check_properties(), []) # test read / write the properties dataset = trainer.train_dataset - self.assertTrue(isinstance(dataset, Dataset)) + self.assertIsInstance(dataset, Dataset) inferer = trainer.train_inferer - self.assertTrue(isinstance(inferer, SimpleInferer)) + self.assertIsInstance(inferer, SimpleInferer) # test optional properties get - self.assertTrue(trainer.train_key_metric is None) + self.assertIsNone(trainer.train_key_metric) trainer.train_dataset = deepcopy(dataset) trainer.train_inferer = deepcopy(inferer) # test optional properties set diff --git a/tests/test_component_store.py b/tests/test_component_store.py index 424eceb3d1..7e7c6dd19d 100644 --- a/tests/test_component_store.py +++ b/tests/test_component_store.py @@ -48,17 +48,17 @@ def test_add2(self): self.cs.add("test_obj2", "Test object", test_obj2) self.assertEqual(len(self.cs), 2) - self.assertTrue("test_obj1" in self.cs) - self.assertTrue("test_obj2" in self.cs) + self.assertIn("test_obj1", self.cs) + self.assertIn("test_obj2", self.cs) def test_add_def(self): - self.assertFalse("test_func" in self.cs) + self.assertNotIn("test_func", self.cs) @self.cs.add_def("test_func", "Test function") def test_func(): return 123 - self.assertTrue("test_func" in self.cs) + self.assertIn("test_func", self.cs) self.assertEqual(len(self.cs), 1) self.assertEqual(list(self.cs), [("test_func", test_func)]) diff --git a/tests/test_compute_f_beta.py b/tests/test_compute_f_beta.py index 85997577cf..8225dee8f5 100644 --- a/tests/test_compute_f_beta.py +++ b/tests/test_compute_f_beta.py @@ -12,6 +12,7 @@ from __future__ import annotations import unittest +from parameterized import parameterized import numpy as np import torch @@ -33,26 +34,15 @@ def test_expecting_success_and_device(self): assert_allclose(result, torch.Tensor([0.714286]), atol=1e-6, rtol=1e-6) np.testing.assert_equal(result.device, y_pred.device) - def test_expecting_success2(self): - metric = FBetaScore(beta=0.5) - metric( - y_pred=torch.Tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]]), y=torch.Tensor([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) - ) - assert_allclose(metric.aggregate()[0], torch.Tensor([0.609756]), atol=1e-6, rtol=1e-6) + @parameterized.expand([ + ("success_beta_0_5", FBetaScore(beta=0.5), torch.Tensor([[1, 0, 1], [0, 1, 0], [1, 0, 1]]), torch.Tensor([0.609756])), + ("success_beta_2", FBetaScore(beta=2), torch.Tensor([[1, 0, 1], [0, 1, 0], [1, 0, 1]]), torch.Tensor([0.862069])), + ("denominator_zero", FBetaScore(beta=2), torch.Tensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), torch.Tensor([0.0])), + ]) + def test_success_and_zero(self, name, metric, y, expected_score): + metric(y_pred=torch.Tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]]), y=y) + assert_allclose(metric.aggregate()[0], expected_score, atol=1e-6, rtol=1e-6) - def test_expecting_success3(self): - metric = FBetaScore(beta=2) - metric( - y_pred=torch.Tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]]), y=torch.Tensor([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) - ) - assert_allclose(metric.aggregate()[0], torch.Tensor([0.862069]), atol=1e-6, rtol=1e-6) - - def test_denominator_is_zero(self): - metric = FBetaScore(beta=2) - metric( - y_pred=torch.Tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]]), y=torch.Tensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) - ) - assert_allclose(metric.aggregate()[0], torch.Tensor([0.0]), atol=1e-6, rtol=1e-6) def test_number_of_dimensions_less_than_2_should_raise_error(self): metric = FBetaScore() diff --git a/tests/test_compute_ho_ver_maps.py b/tests/test_compute_ho_ver_maps.py index bbd5230f04..6e46cf2b1e 100644 --- a/tests/test_compute_ho_ver_maps.py +++ b/tests/test_compute_ho_ver_maps.py @@ -67,8 +67,8 @@ class ComputeHoVerMapsTests(unittest.TestCase): def test_horizontal_certical_maps(self, in_type, arguments, mask, hv_mask): input_image = in_type(mask) result = ComputeHoVerMaps(**arguments)(input_image) - self.assertTrue(isinstance(result, torch.Tensor)) - self.assertTrue(str(result.dtype).split(".")[1] == arguments.get("dtype", "float32")) + self.assertIsInstance(result, torch.Tensor) + self.assertEqual(str(result.dtype).split(".")[1], arguments.get("dtype", "float32")) assert_allclose(result, hv_mask, type_test="tensor") diff --git a/tests/test_compute_ho_ver_maps_d.py b/tests/test_compute_ho_ver_maps_d.py index 7b5ac0d9d7..0734e2e731 100644 --- a/tests/test_compute_ho_ver_maps_d.py +++ b/tests/test_compute_ho_ver_maps_d.py @@ -71,8 +71,8 @@ def test_horizontal_certical_maps(self, in_type, arguments, mask, hv_mask): for k in mask.keys(): input_image[k] = in_type(mask[k]) result = ComputeHoVerMapsd(keys="mask", **arguments)(input_image)[hv_key] - self.assertTrue(isinstance(result, torch.Tensor)) - self.assertTrue(str(result.dtype).split(".")[1] == arguments.get("dtype", "float32")) + self.assertIsInstance(result, torch.Tensor) + self.assertEqual(str(result.dtype).split(".")[1], arguments.get("dtype", "float32")) assert_allclose(result, hv_mask[hv_key], type_test="tensor") diff --git a/tests/test_compute_regression_metrics.py b/tests/test_compute_regression_metrics.py index a8b7f03e47..c407ab6ba6 100644 --- a/tests/test_compute_regression_metrics.py +++ b/tests/test_compute_regression_metrics.py @@ -70,22 +70,24 @@ def test_shape_reduction(self): mt = mt_fn(reduction="mean") mt(in_tensor, in_tensor) out_tensor = mt.aggregate() - self.assertTrue(len(out_tensor.shape) == 1) + self.assertEqual(len(out_tensor.shape), 1) mt = mt_fn(reduction="sum") mt(in_tensor, in_tensor) out_tensor = mt.aggregate() - self.assertTrue(len(out_tensor.shape) == 0) + self.assertEqual(len(out_tensor.shape), 0) mt = mt_fn(reduction="sum") # test reduction arg overriding mt(in_tensor, in_tensor) out_tensor = mt.aggregate(reduction="mean_channel") - self.assertTrue(len(out_tensor.shape) == 1 and out_tensor.shape[0] == batch) + self.assertEqual(len(out_tensor.shape), 1) + self.assertEqual(out_tensor.shape[0], batch) mt = mt_fn(reduction="sum_channel") mt(in_tensor, in_tensor) out_tensor = mt.aggregate() - self.assertTrue(len(out_tensor.shape) == 1 and out_tensor.shape[0] == batch) + self.assertEqual(len(out_tensor.shape), 1) + self.assertEqual(out_tensor.shape[0], batch) def test_compare_numpy(self): set_determinism(seed=123) diff --git a/tests/test_concat_itemsd.py b/tests/test_concat_itemsd.py index 64c5d6e255..564ddf5c1f 100644 --- a/tests/test_concat_itemsd.py +++ b/tests/test_concat_itemsd.py @@ -30,7 +30,7 @@ def test_tensor_values(self): "img2": torch.tensor([[0, 1], [1, 2]], device=device), } result = ConcatItemsd(keys=["img1", "img2"], name="cat_img")(input_data) - self.assertTrue("cat_img" in result) + self.assertIn("cat_img", result) result["cat_img"] += 1 assert_allclose(result["img1"], torch.tensor([[0, 1], [1, 2]], device=device)) assert_allclose(result["cat_img"], torch.tensor([[1, 2], [2, 3], [1, 2], [2, 3]], device=device)) @@ -42,8 +42,8 @@ def test_metatensor_values(self): "img2": MetaTensor([[0, 1], [1, 2]], device=device), } result = ConcatItemsd(keys=["img1", "img2"], name="cat_img")(input_data) - self.assertTrue("cat_img" in result) - self.assertTrue(isinstance(result["cat_img"], MetaTensor)) + self.assertIn("cat_img", result) + self.assertIsInstance(result["cat_img"], MetaTensor) self.assertEqual(result["img1"].meta, result["cat_img"].meta) result["cat_img"] += 1 assert_allclose(result["img1"], torch.tensor([[0, 1], [1, 2]], device=device)) @@ -52,7 +52,7 @@ def test_metatensor_values(self): def test_numpy_values(self): input_data = {"img1": np.array([[0, 1], [1, 2]]), "img2": np.array([[0, 1], [1, 2]])} result = ConcatItemsd(keys=["img1", "img2"], name="cat_img")(input_data) - self.assertTrue("cat_img" in result) + self.assertIn("cat_img", result) result["cat_img"] += 1 np.testing.assert_allclose(result["img1"], np.array([[0, 1], [1, 2]])) np.testing.assert_allclose(result["cat_img"], np.array([[1, 2], [2, 3], [1, 2], [2, 3]])) diff --git a/tests/test_config_parser.py b/tests/test_config_parser.py index cc890a0522..cf1edc8f08 100644 --- a/tests/test_config_parser.py +++ b/tests/test_config_parser.py @@ -185,7 +185,7 @@ def test_function(self, config): if id in ("compute", "cls_compute"): parser[f"{id}#_mode_"] = "callable" func = parser.get_parsed_content(id=id) - self.assertTrue(id in parser.ref_resolver.resolved_content) + self.assertIn(id, parser.ref_resolver.resolved_content) if id == "error_func": with self.assertRaises(TypeError): func(1, 2) diff --git a/tests/test_cucim_dict_transform.py b/tests/test_cucim_dict_transform.py index d2dcc6aa5f..3c5703a34c 100644 --- a/tests/test_cucim_dict_transform.py +++ b/tests/test_cucim_dict_transform.py @@ -80,8 +80,8 @@ class TestCuCIMDict(unittest.TestCase): def test_tramsforms_numpy_single(self, params, input, expected): input = {"image": input} output = CuCIMd(keys="image", **params)(input)["image"] - self.assertTrue(output.dtype == expected.dtype) - self.assertTrue(isinstance(output, np.ndarray)) + self.assertEqual(output.dtype, expected.dtype) + self.assertIsInstance(output, np.ndarray) cp.testing.assert_allclose(output, expected) @parameterized.expand( @@ -98,8 +98,8 @@ def test_tramsforms_numpy_batch(self, params, input, expected): input = {"image": input[cp.newaxis, ...]} expected = expected[cp.newaxis, ...] output = CuCIMd(keys="image", **params)(input)["image"] - self.assertTrue(output.dtype == expected.dtype) - self.assertTrue(isinstance(output, np.ndarray)) + self.assertEqual(output.dtype, expected.dtype) + self.assertIsInstance(output, np.ndarray) cp.testing.assert_allclose(output, expected) @parameterized.expand( @@ -116,8 +116,8 @@ def test_tramsforms_cupy_single(self, params, input, expected): input = {"image": cp.asarray(input)} expected = cp.asarray(expected) output = CuCIMd(keys="image", **params)(input)["image"] - self.assertTrue(output.dtype == expected.dtype) - self.assertTrue(isinstance(output, cp.ndarray)) + self.assertEqual(output.dtype, expected.dtype) + self.assertIsInstance(output, cp.ndarray) cp.testing.assert_allclose(output, expected) @parameterized.expand( @@ -134,8 +134,8 @@ def test_tramsforms_cupy_batch(self, params, input, expected): input = {"image": cp.asarray(input)[cp.newaxis, ...]} expected = cp.asarray(expected)[cp.newaxis, ...] output = CuCIMd(keys="image", **params)(input)["image"] - self.assertTrue(output.dtype == expected.dtype) - self.assertTrue(isinstance(output, cp.ndarray)) + self.assertEqual(output.dtype, expected.dtype) + self.assertIsInstance(output, cp.ndarray) cp.testing.assert_allclose(output, expected) diff --git a/tests/test_cucim_transform.py b/tests/test_cucim_transform.py index 5f16c11589..162e16b52a 100644 --- a/tests/test_cucim_transform.py +++ b/tests/test_cucim_transform.py @@ -79,8 +79,8 @@ class TestCuCIM(unittest.TestCase): ) def test_tramsforms_numpy_single(self, params, input, expected): output = CuCIM(**params)(input) - self.assertTrue(output.dtype == expected.dtype) - self.assertTrue(isinstance(output, np.ndarray)) + self.assertEqual(output.dtype, expected.dtype) + self.assertIsInstance(output, np.ndarray) cp.testing.assert_allclose(output, expected) @parameterized.expand( @@ -97,8 +97,8 @@ def test_tramsforms_numpy_batch(self, params, input, expected): input = input[cp.newaxis, ...] expected = expected[cp.newaxis, ...] output = CuCIM(**params)(input) - self.assertTrue(output.dtype == expected.dtype) - self.assertTrue(isinstance(output, np.ndarray)) + self.assertEqual(output.dtype, expected.dtype) + self.assertIsInstance(output, np.ndarray) cp.testing.assert_allclose(output, expected) @parameterized.expand( @@ -115,8 +115,8 @@ def test_tramsforms_cupy_single(self, params, input, expected): input = cp.asarray(input) expected = cp.asarray(expected) output = CuCIM(**params)(input) - self.assertTrue(output.dtype == expected.dtype) - self.assertTrue(isinstance(output, cp.ndarray)) + self.assertEqual(output.dtype, expected.dtype) + self.assertIsInstance(output, cp.ndarray) cp.testing.assert_allclose(output, expected) @parameterized.expand( @@ -133,8 +133,8 @@ def test_tramsforms_cupy_batch(self, params, input, expected): input = cp.asarray(input)[cp.newaxis, ...] expected = cp.asarray(expected)[cp.newaxis, ...] output = CuCIM(**params)(input) - self.assertTrue(output.dtype == expected.dtype) - self.assertTrue(isinstance(output, cp.ndarray)) + self.assertEqual(output.dtype, expected.dtype) + self.assertIsInstance(output, cp.ndarray) cp.testing.assert_allclose(output, expected) diff --git a/tests/test_decathlondataset.py b/tests/test_decathlondataset.py index d220cd9097..70a2a6c06c 100644 --- a/tests/test_decathlondataset.py +++ b/tests/test_decathlondataset.py @@ -80,7 +80,7 @@ def _test_dataset(dataset): self.assertDictEqual(properties["labels"], {"0": "background", "1": "Anterior", "2": "Posterior"}) shutil.rmtree(os.path.join(testing_dir, "Task04_Hippocampus")) - try: + with self.assertRaisesRegex(RuntimeError, "^Cannot find dataset directory"): DecathlonDataset( root_dir=testing_dir, task="Task04_Hippocampus", @@ -88,9 +88,6 @@ def _test_dataset(dataset): section="validation", download=False, ) - except RuntimeError as e: - print(str(e)) - self.assertTrue(str(e).startswith("Cannot find dataset directory")) if __name__ == "__main__": diff --git a/tests/test_detect_envelope.py b/tests/test_detect_envelope.py index e2efefeb77..f9c2b5ac53 100644 --- a/tests/test_detect_envelope.py +++ b/tests/test_detect_envelope.py @@ -147,7 +147,7 @@ def test_value_error(self, arguments, image, method): elif method == "__call__": self.assertRaises(ValueError, DetectEnvelope(**arguments), image) else: - raise ValueError("Expected raising method invalid. Should be __init__ or __call__.") + self.fail("Expected raising method invalid. Should be __init__ or __call__.") @SkipIfModule("torch.fft") diff --git a/tests/test_ensure_typed.py b/tests/test_ensure_typed.py index 09aa1f04b5..c97e4badd3 100644 --- a/tests/test_ensure_typed.py +++ b/tests/test_ensure_typed.py @@ -33,8 +33,8 @@ def test_array_input(self): keys="data", data_type=dtype, dtype=np.float32 if dtype == "NUMPY" else None, device="cpu" )({"data": test_data})["data"] if dtype == "NUMPY": - self.assertTrue(result.dtype == np.float32) - self.assertTrue(isinstance(result, torch.Tensor if dtype == "tensor" else np.ndarray)) + self.assertEqual(result.dtype, np.float32) + self.assertIsInstance(result, torch.Tensor if dtype == "tensor" else np.ndarray) assert_allclose(result, test_data, type_test=False) self.assertTupleEqual(result.shape, (2, 2)) @@ -45,7 +45,7 @@ def test_single_input(self): for test_data in test_datas: for dtype in ("tensor", "numpy"): result = EnsureTyped(keys="data", data_type=dtype)({"data": test_data})["data"] - self.assertTrue(isinstance(result, torch.Tensor if dtype == "tensor" else np.ndarray)) + self.assertIsInstance(result, torch.Tensor if dtype == "tensor" else np.ndarray) if isinstance(test_data, bool): self.assertFalse(result) else: @@ -56,11 +56,11 @@ def test_string(self): for dtype in ("tensor", "numpy"): # string input result = EnsureTyped(keys="data", data_type=dtype)({"data": "test_string"})["data"] - self.assertTrue(isinstance(result, str)) + self.assertIsInstance(result, str) self.assertEqual(result, "test_string") # numpy array of string result = EnsureTyped(keys="data", data_type=dtype)({"data": np.array(["test_string"])})["data"] - self.assertTrue(isinstance(result, np.ndarray)) + self.assertIsInstance(result, np.ndarray) self.assertEqual(result[0], "test_string") def test_list_tuple(self): @@ -68,15 +68,15 @@ def test_list_tuple(self): result = EnsureTyped(keys="data", data_type=dtype, wrap_sequence=False, track_meta=True)( {"data": [[1, 2], [3, 4]]} )["data"] - self.assertTrue(isinstance(result, list)) - self.assertTrue(isinstance(result[0][1], MetaTensor if dtype == "tensor" else np.ndarray)) + self.assertIsInstance(result, list) + self.assertIsInstance(result[0][1], MetaTensor if dtype == "tensor" else np.ndarray) assert_allclose(result[1][0], torch.as_tensor(3), type_test=False) # tuple of numpy arrays result = EnsureTyped(keys="data", data_type=dtype, wrap_sequence=False)( {"data": (np.array([1, 2]), np.array([3, 4]))} )["data"] - self.assertTrue(isinstance(result, tuple)) - self.assertTrue(isinstance(result[0], torch.Tensor if dtype == "tensor" else np.ndarray)) + self.assertIsInstance(result, tuple) + self.assertIsInstance(result[0], torch.Tensor if dtype == "tensor" else np.ndarray) assert_allclose(result[1], torch.as_tensor([3, 4]), type_test=False) def test_dict(self): @@ -92,19 +92,19 @@ def test_dict(self): ) for key in ("data", "label"): result = trans[key] - self.assertTrue(isinstance(result, dict)) - self.assertTrue(isinstance(result["img"], torch.Tensor if dtype == "tensor" else np.ndarray)) - self.assertTrue(isinstance(result["meta"]["size"], torch.Tensor if dtype == "tensor" else np.ndarray)) + self.assertIsInstance(result, dict) + self.assertIsInstance(result["img"], torch.Tensor if dtype == "tensor" else np.ndarray) + self.assertIsInstance(result["meta"]["size"], torch.Tensor if dtype == "tensor" else np.ndarray) self.assertEqual(result["meta"]["path"], "temp/test") self.assertEqual(result["extra"], None) assert_allclose(result["img"], torch.as_tensor([1.0, 2.0]), type_test=False) assert_allclose(result["meta"]["size"], torch.as_tensor([1, 2, 3]), type_test=False) if dtype == "numpy": - self.assertTrue(trans["data"]["img"].dtype == np.float32) - self.assertTrue(trans["label"]["img"].dtype == np.int8) + self.assertEqual(trans["data"]["img"].dtype, np.float32) + self.assertEqual(trans["label"]["img"].dtype, np.int8) else: - self.assertTrue(trans["data"]["img"].dtype == torch.float32) - self.assertTrue(trans["label"]["img"].dtype == torch.int8) + self.assertEqual(trans["data"]["img"].dtype, torch.float32) + self.asseassertEqualrtTrue(trans["label"]["img"].dtype, torch.int8) if __name__ == "__main__": diff --git a/tests/test_flipd.py b/tests/test_flipd.py index 277f387051..1df6d34056 100644 --- a/tests/test_flipd.py +++ b/tests/test_flipd.py @@ -78,7 +78,7 @@ def test_torch(self, spatial_axis, img: torch.Tensor, track_meta: bool, device): def test_meta_dict(self): xform = Flipd("image", [0, 1]) res = xform({"image": torch.zeros(1, 3, 4)}) - self.assertTrue(res["image"].applied_operations == res["image_transforms"]) + self.assertEqual(res["image"].applied_operations, res["image_transforms"]) if __name__ == "__main__": diff --git a/tests/test_freeze_layers.py b/tests/test_freeze_layers.py index 1bea4ed1b5..7be8e576bf 100644 --- a/tests/test_freeze_layers.py +++ b/tests/test_freeze_layers.py @@ -40,9 +40,9 @@ def test_freeze_vars(self, device): for name, param in model.named_parameters(): if "class_layer" in name: - self.assertEqual(param.requires_grad, False) + self.assertFalse(param.requires_grad) else: - self.assertEqual(param.requires_grad, True) + self.assertTrue(param.requires_grad) @parameterized.expand(TEST_CASES) def test_exclude_vars(self, device): @@ -53,9 +53,9 @@ def test_exclude_vars(self, device): for name, param in model.named_parameters(): if "class_layer" in name: - self.assertEqual(param.requires_grad, True) + self.assertTrue(param.requires_grad) else: - self.assertEqual(param.requires_grad, False) + self.assertFalse(param.requires_grad) if __name__ == "__main__": diff --git a/tests/test_generalized_dice_loss.py b/tests/test_generalized_dice_loss.py index 7499507129..5738f4a089 100644 --- a/tests/test_generalized_dice_loss.py +++ b/tests/test_generalized_dice_loss.py @@ -184,7 +184,7 @@ def test_differentiability(self): generalized_dice_loss = GeneralizedDiceLoss() loss = generalized_dice_loss(prediction, target) - self.assertNotEqual(loss.grad_fn, None) + self.assertIsNotNone(loss.grad_fn) def test_batch(self): prediction = torch.zeros(2, 3, 3, 3) @@ -194,7 +194,7 @@ def test_batch(self): generalized_dice_loss = GeneralizedDiceLoss(batch=True) loss = generalized_dice_loss(prediction, target) - self.assertNotEqual(loss.grad_fn, None) + self.assertIsNotNone(loss.grad_fn) def test_script(self): loss = GeneralizedDiceLoss() diff --git a/tests/test_get_package_version.py b/tests/test_get_package_version.py index ab9e69cd31..cdfe09c6dc 100644 --- a/tests/test_get_package_version.py +++ b/tests/test_get_package_version.py @@ -20,14 +20,14 @@ class TestGetVersion(unittest.TestCase): def test_default(self): output = get_package_version("42foobarnoexist") - self.assertTrue("UNKNOWN" in output) + self.assertIn("UNKNOWN", output) output = get_package_version("numpy") - self.assertFalse("UNKNOWN" in output) + self.assertIn("UNKNOWN", output) def test_msg(self): output = get_package_version("42foobarnoexist", "test") - self.assertTrue("test" in output) + self.assertIn("test", output) if __name__ == "__main__": diff --git a/tests/test_global_mutual_information_loss.py b/tests/test_global_mutual_information_loss.py index 36a1978c93..51faf71483 100644 --- a/tests/test_global_mutual_information_loss.py +++ b/tests/test_global_mutual_information_loss.py @@ -12,6 +12,7 @@ import os import unittest +from parameterized import parameterized import numpy as np import torch @@ -116,24 +117,27 @@ def transformation(translate_params=(0.0, 0.0, 0.0), rotate_params=(0.0, 0.0, 0. class TestGlobalMutualInformationLossIll(unittest.TestCase): - def test_ill_shape(self): + @parameterized.expand([ + ("mismatched_simple_dims", torch.ones((1, 2), dtype=torch.float), torch.ones((1, 3), dtype=torch.float)), + ("mismatched_advanced_dims", torch.ones((1, 3, 3), dtype=torch.float), torch.ones((1, 3), dtype=torch.float)), + # You can add more test cases as needed + ]) + def test_ill_shape(self, name, input1, input2): loss = GlobalMutualInformationLoss() - with self.assertRaisesRegex(ValueError, ""): - loss.forward(torch.ones((1, 2), dtype=torch.float), torch.ones((1, 3), dtype=torch.float, device=device)) - with self.assertRaisesRegex(ValueError, ""): - loss.forward(torch.ones((1, 3, 3), dtype=torch.float), torch.ones((1, 3), dtype=torch.float, device=device)) - - def test_ill_opts(self): + with self.assertRaises(ValueError): + loss.forward(input1, input2) + + @parameterized.expand([ + ("num_bins_zero", 0, "mean", ValueError, ""), + ("num_bins_negative", -1, "mean", ValueError, ""), + ("reduction_unknown", 64, "unknown", ValueError, ""), + ("reduction_none", 64, None, ValueError, ""), + ]) + def test_ill_opts(self, name, num_bins, reduction, expected_exception, expected_message): pred = torch.ones((1, 3, 3, 3, 3), dtype=torch.float, device=device) target = torch.ones((1, 3, 3, 3, 3), dtype=torch.float, device=device) - with self.assertRaisesRegex(ValueError, ""): - GlobalMutualInformationLoss(num_bins=0)(pred, target) - with self.assertRaisesRegex(ValueError, ""): - GlobalMutualInformationLoss(num_bins=-1)(pred, target) - with self.assertRaisesRegex(ValueError, ""): - GlobalMutualInformationLoss(reduction="unknown")(pred, target) - with self.assertRaisesRegex(ValueError, ""): - GlobalMutualInformationLoss(reduction=None)(pred, target) + with self.assertRaisesRegex(expected_exception, expected_message): + GlobalMutualInformationLoss(num_bins=num_bins, reduction=reduction)(pred, target) if __name__ == "__main__": diff --git a/tests/test_grid_patch.py b/tests/test_grid_patch.py index 4b324eda1a..56af123548 100644 --- a/tests/test_grid_patch.py +++ b/tests/test_grid_patch.py @@ -124,11 +124,11 @@ def test_grid_patch_meta(self, input_parameters, image, expected, expected_meta) self.assertTrue(output.meta["path"] == expected_meta[0]["path"]) for output_patch, expected_patch, expected_patch_meta in zip(output, expected, expected_meta): assert_allclose(output_patch, expected_patch, type_test=False) - self.assertTrue(isinstance(output_patch, MetaTensor)) - self.assertTrue(output_patch.meta["location"] == expected_patch_meta["location"]) + self.assertIsInstance(output_patch, MetaTensor) + self.assertEqual(output_patch.meta["location"], expected_patch_meta["location"]) self.assertTrue(output_patch.meta["spatial_shape"], list(output_patch.shape[1:])) if "path" in expected_meta[0]: - self.assertTrue(output_patch.meta["path"] == expected_patch_meta["path"]) + self.assertEqual(output_patch.meta["path"], expected_patch_meta["path"]) if __name__ == "__main__": diff --git a/tests/test_handler_stats.py b/tests/test_handler_stats.py index f876cff2a3..25fbd37ce8 100644 --- a/tests/test_handler_stats.py +++ b/tests/test_handler_stats.py @@ -76,9 +76,9 @@ def _update_metric(engine): if has_key_word.match(line): content_count += 1 if epoch_log is True: - self.assertTrue(content_count == max_epochs) + self.assertEqual(content_count, max_epochs) else: - self.assertTrue(content_count == 2) # 2 = len([1, 2]) from event_filter + self.assertEqual(content_count, 2) # 2 = len([1, 2]) from event_filter @parameterized.expand([[True], [get_event_filter([1, 3])]]) def test_loss_print(self, iteration_log): @@ -116,9 +116,9 @@ def _train_func(engine, batch): if has_key_word.match(line): content_count += 1 if iteration_log is True: - self.assertTrue(content_count == num_iters * max_epochs) + self.assertEqual(content_count, num_iters * max_epochs) else: - self.assertTrue(content_count == 2) # 2 = len([1, 3]) from event_filter + self.assertEqual(content_count, 2) # 2 = len([1, 3]) from event_filter def test_loss_dict(self): log_stream = StringIO() @@ -150,7 +150,7 @@ def _train_func(engine, batch): for line in output_str.split("\n"): if has_key_word.match(line): content_count += 1 - self.assertTrue(content_count > 0) + self.assertGreater(content_count, 0) def test_loss_file(self): key_to_handler = "test_logging" @@ -184,7 +184,7 @@ def _train_func(engine, batch): for line in output_str.split("\n"): if has_key_word.match(line): content_count += 1 - self.assertTrue(content_count > 0) + self.assertGreater(content_count, 0) def test_exception(self): # set up engine @@ -274,7 +274,7 @@ def _train_func(engine, batch): for line in output_str.split("\n"): if has_key_word.match(line): content_count += 1 - self.assertTrue(content_count > 0) + self.assertGreater(content_count, 0) if __name__ == "__main__": diff --git a/tests/test_hausdorff_loss.py b/tests/test_hausdorff_loss.py index f279d45b14..15f00a7440 100644 --- a/tests/test_hausdorff_loss.py +++ b/tests/test_hausdorff_loss.py @@ -219,17 +219,16 @@ def test_ill_opts(self): with self.assertRaisesRegex(ValueError, ""): HausdorffDTLoss(reduction=None)(chn_input, chn_target) - def test_input_warnings(self): + @parameterized.expand([ + (False, False, False), + (False, True, False), + (False, False, True), + ]) + def test_input_warnings(self, include_background, softmax, to_onehot_y): chn_input = torch.ones((1, 1, 1, 3)) chn_target = torch.ones((1, 1, 1, 3)) with self.assertWarns(Warning): - loss = HausdorffDTLoss(include_background=False) - loss.forward(chn_input, chn_target) - with self.assertWarns(Warning): - loss = HausdorffDTLoss(softmax=True) - loss.forward(chn_input, chn_target) - with self.assertWarns(Warning): - loss = HausdorffDTLoss(to_onehot_y=True) + loss = HausdorffDTLoss(include_background=include_background, softmax=softmax, to_onehot_y=to_onehot_y) loss.forward(chn_input, chn_target) @@ -256,17 +255,16 @@ def test_ill_opts(self): with self.assertRaisesRegex(ValueError, ""): LogHausdorffDTLoss(reduction=None)(chn_input, chn_target) - def test_input_warnings(self): + @parameterized.expand([ + (False, False, False), + (False, True, False), + (False, False, True), + ]) + def test_input_warnings(self, include_background, softmax, to_onehot_y): chn_input = torch.ones((1, 1, 1, 3)) chn_target = torch.ones((1, 1, 1, 3)) with self.assertWarns(Warning): - loss = LogHausdorffDTLoss(include_background=False) - loss.forward(chn_input, chn_target) - with self.assertWarns(Warning): - loss = LogHausdorffDTLoss(softmax=True) - loss.forward(chn_input, chn_target) - with self.assertWarns(Warning): - loss = LogHausdorffDTLoss(to_onehot_y=True) + loss = LogHausdorffDTLoss(include_background=include_background, softmax=softmax, to_onehot_y=to_onehot_y) loss.forward(chn_input, chn_target) diff --git a/tests/test_integration_bundle_run.py b/tests/test_integration_bundle_run.py index c2e0fb55b7..35162fd008 100644 --- a/tests/test_integration_bundle_run.py +++ b/tests/test_integration_bundle_run.py @@ -135,9 +135,9 @@ def test_scripts_fold(self): command_run = cmd + ["run", "training", "--config_file", config_file, "--meta_file", meta_file] completed_process = subprocess.run(command_run, check=True, capture_output=True, text=True) output = repr(completed_process.stdout).replace("\\n", "\n").replace("\\t", "\t") # Get the captured output - print(output) + # print(output) - self.assertTrue(expected_condition in output) + self.assertIn(expected_condition, output) command_run_workflow = cmd + [ "run_workflow", "--run_id", @@ -149,8 +149,8 @@ def test_scripts_fold(self): ] completed_process = subprocess.run(command_run_workflow, check=True, capture_output=True, text=True) output = repr(completed_process.stdout).replace("\\n", "\n").replace("\\t", "\t") # Get the captured output - print(output) - self.assertTrue(expected_condition in output) + # print(output) + self.assertIn(expected_condition, output) # test missing meta file self.assertIn("ERROR", command_line_tests(cmd + ["run", "training", "--config_file", config_file])) diff --git a/tests/test_inverse_collation.py b/tests/test_inverse_collation.py index f33b5c67eb..bf3972e6bd 100644 --- a/tests/test_inverse_collation.py +++ b/tests/test_inverse_collation.py @@ -133,7 +133,7 @@ def test_collation(self, _, transform, collate_fn, ndim): d = decollate_batch(item) self.assertTrue(len(d) <= self.batch_size) for b in d: - self.assertTrue(isinstance(b["image"], MetaTensor)) + self.assertIsInstance(b["image"], MetaTensor) np.testing.assert_array_equal( b["image"].applied_operations[-1]["orig_size"], b["label"].applied_operations[-1]["orig_size"] ) diff --git a/tests/test_invertd.py b/tests/test_invertd.py index c32a3af643..f6e8fc40e7 100644 --- a/tests/test_invertd.py +++ b/tests/test_invertd.py @@ -134,7 +134,7 @@ def test_invert(self): # 25300: 2 workers (cpu, non-macos) # 1812: 0 workers (gpu or macos) # 1821: windows torch 1.10.0 - self.assertTrue((reverted.size - n_good) < 40000, f"diff. {reverted.size - n_good}") + self.assertLess((reverted.size - n_good), 40000, f"diff. {reverted.size - n_good}") set_determinism(seed=None) diff --git a/tests/test_load_imaged.py b/tests/test_load_imaged.py index 699ed70059..914240c705 100644 --- a/tests/test_load_imaged.py +++ b/tests/test_load_imaged.py @@ -190,7 +190,7 @@ def test_correct(self, input_p, expected_shape, track_meta): self.assertTrue(hasattr(r, "affine")) self.assertIsInstance(r.affine, torch.Tensor) self.assertEqual(r.meta["space"], "RAS") - self.assertTrue("qform_code" not in r.meta) + self.assertNotIn("qform_code", r.meta) else: self.assertIsInstance(r, torch.Tensor) self.assertNotIsInstance(r, MetaTensor) diff --git a/tests/test_load_spacing_orientation.py b/tests/test_load_spacing_orientation.py index 63422761ca..bc64f18eaf 100644 --- a/tests/test_load_spacing_orientation.py +++ b/tests/test_load_spacing_orientation.py @@ -48,7 +48,7 @@ def test_load_spacingd(self, filename): ref = resample_to_output(anat, (1, 0.2, 1), order=1) t2 = time.time() print(f"time scipy: {t2 - t1}") - self.assertTrue(t2 >= t1) + self.assertGreaterEqual(t2, t1) np.testing.assert_allclose(res_dict["image"].affine, ref.affine) np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape) np.testing.assert_allclose(ref.get_fdata(), res_dict["image"][0], atol=0.05) diff --git a/tests/test_look_up_option.py b/tests/test_look_up_option.py index d40b7eaa8c..75560b4ac4 100644 --- a/tests/test_look_up_option.py +++ b/tests/test_look_up_option.py @@ -56,7 +56,7 @@ def test_default(self): def test_str_enum(self): output = look_up_option("C", {"A", "B"}, default=None) - self.assertEqual(output, None) + self.assertIsNone(output) self.assertEqual(list(_CaseStrEnum), ["A", "B"]) self.assertEqual(_CaseStrEnum.MODE_A, "A") self.assertEqual(str(_CaseStrEnum.MODE_A), "A") diff --git a/tests/test_matshow3d.py b/tests/test_matshow3d.py index e513025e69..e54bb523e4 100644 --- a/tests/test_matshow3d.py +++ b/tests/test_matshow3d.py @@ -78,7 +78,7 @@ def test_samples(self): fig, mat = matshow3d( [im[keys] for im in ims], title=f"testing {keys}", figsize=(2, 2), frames_per_row=5, every_n=2, show=False ) - self.assertTrue(mat.dtype == np.float32) + self.assertEqual(mat.dtype, np.float32) with tempfile.TemporaryDirectory() as tempdir: tempimg = f"{tempdir}/matshow3d_patch_test.png" diff --git a/tests/test_median_filter.py b/tests/test_median_filter.py index 1f5e623260..f8f2bc3f87 100644 --- a/tests/test_median_filter.py +++ b/tests/test_median_filter.py @@ -12,6 +12,7 @@ from __future__ import annotations import unittest +from parameterized import parameterized import numpy as np import torch @@ -20,22 +21,17 @@ class MedianFilterTestCase(unittest.TestCase): + @parameterized.expand([ + ("3d_big", torch.ones(1, 1, 2, 3, 5), MedianFilter([1, 2, 4])), + ("3d", torch.ones(1, 1, 4, 3, 4), MedianFilter(1)), + ]) + def test_3d(self, name, input_tensor, filter): + filter = filter.to(torch.device("cpu:0")) - def test_3d_big(self): - a = torch.ones(1, 1, 2, 3, 5) - g = MedianFilter([1, 2, 4]).to(torch.device("cpu:0")) + expected = input_tensor.numpy() + output = filter(input_tensor).cpu().numpy() - expected = a.numpy() - out = g(a).cpu().numpy() - np.testing.assert_allclose(out, expected, rtol=1e-5) - - def test_3d(self): - a = torch.ones(1, 1, 4, 3, 4) - g = MedianFilter(1).to(torch.device("cpu:0")) - - expected = a.numpy() - out = g(a).cpu().numpy() - np.testing.assert_allclose(out, expected, rtol=1e-5) + np.testing.assert_allclose(output, expected, rtol=1e-5) def test_3d_radii(self): a = torch.ones(1, 1, 4, 3, 2) diff --git a/tests/test_mednistdataset.py b/tests/test_mednistdataset.py index baf3bf4f2d..c1b21e9373 100644 --- a/tests/test_mednistdataset.py +++ b/tests/test_mednistdataset.py @@ -41,7 +41,7 @@ def _test_dataset(dataset): self.assertEqual(len(dataset), int(MEDNIST_FULL_DATASET_LENGTH * dataset.test_frac)) self.assertTrue("image" in dataset[0]) self.assertTrue("label" in dataset[0]) - self.assertTrue(isinstance(dataset[0]["image"], MetaTensor)) + self.assertIsInstance(dataset[0]["image"], MetaTensor) self.assertTupleEqual(dataset[0]["image"].shape, (1, 64, 64)) with skip_if_downloading_fails(): @@ -65,11 +65,8 @@ def _test_dataset(dataset): self.assertEqual(data[0]["class_name"], "AbdomenCT") self.assertEqual(data[0]["label"], 0) shutil.rmtree(os.path.join(testing_dir, "MedNIST")) - try: + with self.assertRaisesRegex(RuntimeError, "^Cannot find dataset directory"): MedNISTDataset(root_dir=testing_dir, transform=transform, section="test", download=False) - except RuntimeError as e: - print(str(e)) - self.assertTrue(str(e).startswith("Cannot find dataset directory")) if __name__ == "__main__": diff --git a/tests/test_meta_affine.py b/tests/test_meta_affine.py index 95764a0c89..890734391f 100644 --- a/tests/test_meta_affine.py +++ b/tests/test_meta_affine.py @@ -160,7 +160,7 @@ def test_linear_consistent(self, xform_cls, input_dict, atol): diff = np.abs(itk.GetArrayFromImage(ref_2) - itk.GetArrayFromImage(expected)) avg_diff = np.mean(diff) - self.assertTrue(avg_diff < atol, f"{xform_cls} avg_diff: {avg_diff}, tol: {atol}") + self.assertLess(avg_diff, atol, f"{xform_cls} avg_diff: {avg_diff}, tol: {atol}") @parameterized.expand(TEST_CASES_DICT) def test_linear_consistent_dict(self, xform_cls, input_dict, atol): @@ -175,7 +175,7 @@ def test_linear_consistent_dict(self, xform_cls, input_dict, atol): diff = {k: np.abs(itk.GetArrayFromImage(ref_2[k]) - itk.GetArrayFromImage(expected[k])) for k in keys} avg_diff = {k: np.mean(diff[k]) for k in keys} for k in keys: - self.assertTrue(avg_diff[k] < atol, f"{xform_cls} avg_diff: {avg_diff}, tol: {atol}") + self.assertLess(avg_diff[k], atol, f"{xform_cls} avg_diff: {avg_diff}, tol: {atol}") if __name__ == "__main__": diff --git a/tests/test_meta_tensor.py b/tests/test_meta_tensor.py index 1e0f188b63..f31a07eba4 100644 --- a/tests/test_meta_tensor.py +++ b/tests/test_meta_tensor.py @@ -222,9 +222,9 @@ def test_stack(self, device, dtype): def test_get_set_meta_fns(self): set_track_meta(False) - self.assertEqual(get_track_meta(), False) + self.assertFalse(get_track_meta()) set_track_meta(True) - self.assertEqual(get_track_meta(), True) + self.assertTrue(get_track_meta()) @parameterized.expand(TEST_DEVICES) def test_torchscript(self, device): diff --git a/tests/test_mmar_download.py b/tests/test_mmar_download.py index 6af3d09fb2..2ac73a8149 100644 --- a/tests/test_mmar_download.py +++ b/tests/test_mmar_download.py @@ -142,7 +142,7 @@ def test_load_ckpt(self, input_args, expected_name, expected_val): def test_unique(self): # model ids are unique keys = sorted(m["id"] for m in MODEL_DESC) - self.assertTrue(keys == sorted(set(keys))) + self.assertEqual(keys, sorted(set(keys))) def test_search(self): self.assertEqual(_get_val({"a": 1, "b": 2}, key="b"), 2) diff --git a/tests/test_monai_utils_misc.py b/tests/test_monai_utils_misc.py index a2a4ed62f7..55ed009671 100644 --- a/tests/test_monai_utils_misc.py +++ b/tests/test_monai_utils_misc.py @@ -92,12 +92,12 @@ def test_run_cmd(self): cmd2 = "-c" cmd3 = 'import sys; print("\\tThis is on stderr\\n", file=sys.stderr); sys.exit(1)' os.environ["MONAI_DEBUG"] = str(True) - try: + with self.assertRaises(RuntimeError) as cm: run_cmd([cmd1, cmd2, cmd3], check=True) - except RuntimeError as err: - self.assertIn("This is on stderr", str(err)) - self.assertNotIn("\\n", str(err)) - self.assertNotIn("\\t", str(err)) + self.assertIn("This is on stderr", str(cm.exception)) + self.assertNotIn("\\n", str(cm.exception)) + self.assertNotIn("\\t", str(cm.exception)) + if __name__ == "__main__": diff --git a/tests/test_multi_scale.py b/tests/test_multi_scale.py index 6681f266a8..930bf9d5e1 100644 --- a/tests/test_multi_scale.py +++ b/tests/test_multi_scale.py @@ -58,17 +58,19 @@ def test_shape(self, input_param, input_data, expected_val): result = MultiScaleLoss(**input_param).forward(**input_data) np.testing.assert_allclose(result.detach().cpu().numpy(), expected_val, rtol=1e-5) - def test_ill_opts(self): - with self.assertRaisesRegex(ValueError, ""): - MultiScaleLoss(loss=dice_loss, kernel="none") - with self.assertRaisesRegex(ValueError, ""): - MultiScaleLoss(loss=dice_loss, scales=[-1])( - torch.ones((1, 1, 3), device=device), torch.ones((1, 1, 3), device=device) - ) - with self.assertRaisesRegex(ValueError, ""): - MultiScaleLoss(loss=dice_loss, scales=[-1], reduction="none")( - torch.ones((1, 1, 3), device=device), torch.ones((1, 1, 3), device=device) - ) + @parameterized.expand([ + ("kernel_none", {"loss": dice_loss, "kernel": "none"}, None, None), + ("scales_negative", {"loss": dice_loss, "scales": [-1]}, torch.ones((1, 1, 3)), torch.ones((1, 1, 3))), + ("scales_negative_reduction_none", {"loss": dice_loss, "scales": [-1], "reduction": "none"}, + torch.ones((1, 1, 3)), torch.ones((1, 1, 3))), + ]) + def test_ill_opts(self, name, kwargs, input, target): + if input is None and target is None: + with self.assertRaisesRegex(ValueError, ""): + MultiScaleLoss(**kwargs) + else: + with self.assertRaisesRegex(ValueError, ""): + MultiScaleLoss(**kwargs)(input, target) def test_script(self): input_param, input_data, expected_val = TEST_CASES[0] diff --git a/tests/test_optional_import.py b/tests/test_optional_import.py index e7e1c03fd0..1692950e52 100644 --- a/tests/test_optional_import.py +++ b/tests/test_optional_import.py @@ -12,23 +12,20 @@ from __future__ import annotations import unittest +from parameterized import parameterized from monai.utils import OptionalImportError, exact_version, optional_import class TestOptionalImport(unittest.TestCase): - def test_default(self): - my_module, flag = optional_import("not_a_module") + @parameterized.expand(["not_a_module", "torch.randint"]) + def test_default(self, import_module): + my_module, flag = optional_import(import_module) self.assertFalse(flag) with self.assertRaises(OptionalImportError): my_module.test - my_module, flag = optional_import("torch.randint") - with self.assertRaises(OptionalImportError): - self.assertFalse(flag) - print(my_module.test) - def test_import_valid(self): my_module, flag = optional_import("torch") self.assertTrue(flag) @@ -47,21 +44,13 @@ def test_import_wrong_number(self): self.assertTrue(flag) print(my_module.randint(1, 2, (1, 2))) - def test_import_good_number(self): - my_module, flag = optional_import("torch", "0") + @parameterized.expand(["0", "0.0.0.1", "1.1.0"]) + def test_import_good_number(self, version_number): + my_module, flag = optional_import("torch", version_number) my_module.nn self.assertTrue(flag) print(my_module.randint(1, 2, (1, 2))) - my_module, flag = optional_import("torch", "0.0.0.1") - my_module.nn - self.assertTrue(flag) - print(my_module.randint(1, 2, (1, 2))) - - my_module, flag = optional_import("torch", "1.1.0") - my_module.nn - self.assertTrue(flag) - print(my_module.randint(1, 2, (1, 2))) def test_import_exact(self): my_module, flag = optional_import("torch", "0", exact_version) diff --git a/tests/test_pad_collation.py b/tests/test_pad_collation.py index ee6e001438..17f49611df 100644 --- a/tests/test_pad_collation.py +++ b/tests/test_pad_collation.py @@ -117,7 +117,7 @@ def test_pad_collation(self, t_type, collate_method, transform): batch_inverse = BatchInverseTransform(dataset.transform, loader) for data in loader: output = batch_inverse(data) - self.assertTrue(output[0]["image"].shape, (1, 10, 9)) + self.assertEqual(output[0]["image"].shape, (1, 10, 9)) if __name__ == "__main__": diff --git a/tests/test_perceptual_loss.py b/tests/test_perceptual_loss.py index 02232e6f8d..8889f5ea06 100644 --- a/tests/test_perceptual_loss.py +++ b/tests/test_perceptual_loss.py @@ -85,12 +85,11 @@ def test_1d(self): with self.assertRaises(NotImplementedError): PerceptualLoss(spatial_dims=1) - def test_medicalnet_on_2d_data(self): + @parameterized.expand(["medicalnet_resnet10_23datasets", "medicalnet_resnet50_23datasets"]) + def test_medicalnet_on_2d_data(self, network_type): with self.assertRaises(ValueError): - PerceptualLoss(spatial_dims=2, network_type="medicalnet_resnet10_23datasets") + PerceptualLoss(spatial_dims=2, network_type=network_type) - with self.assertRaises(ValueError): - PerceptualLoss(spatial_dims=2, network_type="medicalnet_resnet50_23datasets") if __name__ == "__main__": diff --git a/tests/test_persistentdataset.py b/tests/test_persistentdataset.py index b7bf2fbb11..7c4969e283 100644 --- a/tests/test_persistentdataset.py +++ b/tests/test_persistentdataset.py @@ -165,7 +165,7 @@ def test_different_transforms(self): im1 = PersistentDataset([im], Identity(), cache_dir=path, hash_transform=json_hashing)[0] im2 = PersistentDataset([im], Flip(1), cache_dir=path, hash_transform=json_hashing)[0] l2 = ((im1 - im2) ** 2).sum() ** 0.5 - self.assertTrue(l2 > 1) + self.assertGreater(l2, 1) if __name__ == "__main__": diff --git a/tests/test_prepare_batch_default.py b/tests/test_prepare_batch_default.py index d5a5fbf57e..1e2a0a1c02 100644 --- a/tests/test_prepare_batch_default.py +++ b/tests/test_prepare_batch_default.py @@ -12,6 +12,7 @@ from __future__ import annotations import unittest +from parameterized import parameterized import torch @@ -27,9 +28,8 @@ def forward(self, x: torch.Tensor): class TestPrepareBatchDefault(unittest.TestCase): - def test_dict_content(self): - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - dataloader = [ + @parameterized.expand([ + ("dict_content", [ { "image": torch.tensor([1, 2]), "label": torch.tensor([3, 4]), @@ -37,75 +37,33 @@ def test_dict_content(self): "extra2": 16, "extra3": "test", } - ] - # set up engine - evaluator = SupervisedEvaluator( - device=device, - val_data_loader=dataloader, - epoch_length=1, - network=TestNet(), - non_blocking=False, - prepare_batch=PrepareBatchDefault(), - decollate=False, - mode="eval", - ) - evaluator.run() - output = evaluator.state.output - assert_allclose(output["image"], torch.tensor([1, 2], device=device)) - assert_allclose(output["label"], torch.tensor([3, 4], device=device)) - - def test_tensor_content(self): + ], TestNet(), True), + ("tensor_content", [torch.tensor([1, 2])], torch.nn.Identity(), True), + ("pair_content", [(torch.tensor([1, 2]), torch.tensor([3, 4]))], torch.nn.Identity(), True), + ("empty_data", [], TestNet(), False), + ]) + def test_prepare_batch(self, name, dataloader, network, should_run): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - dataloader = [torch.tensor([1, 2])] - - # set up engine evaluator = SupervisedEvaluator( device=device, val_data_loader=dataloader, - epoch_length=1, - network=torch.nn.Identity(), + epoch_length=len(dataloader) if should_run else 0, + network=network, non_blocking=False, prepare_batch=PrepareBatchDefault(), decollate=False, - mode="eval", + mode="eval" if should_run else "train", ) evaluator.run() - output = evaluator.state.output - assert_allclose(output["image"], torch.tensor([1, 2], device=device)) - self.assertTrue(output["label"] is None) - def test_pair_content(self): - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - dataloader = [(torch.tensor([1, 2]), torch.tensor([3, 4]))] - - # set up engine - evaluator = SupervisedEvaluator( - device=device, - val_data_loader=dataloader, - epoch_length=1, - network=torch.nn.Identity(), - non_blocking=False, - prepare_batch=PrepareBatchDefault(), - decollate=False, - mode="eval", - ) - evaluator.run() - output = evaluator.state.output - assert_allclose(output["image"], torch.tensor([1, 2], device=device)) - assert_allclose(output["label"], torch.tensor([3, 4], device=device)) - - def test_empty_data(self): - dataloader = [] - evaluator = SupervisedEvaluator( - val_data_loader=dataloader, - device=torch.device("cpu"), - epoch_length=0, - network=TestNet(), - non_blocking=False, - prepare_batch=PrepareBatchDefault(), - decollate=False, - ) - evaluator.run() + if should_run: + output = evaluator.state.output + if name == "dict_content" or name == "pair_content": + assert_allclose(output["image"], torch.tensor([1, 2], device=device)) + assert_allclose(output["label"], torch.tensor([3, 4], device=device)) + elif name == "tensor_content": + assert_allclose(output["image"], torch.tensor([1, 2], device=device)) + self.assertTrue(output["label"] is None) if __name__ == "__main__": diff --git a/tests/test_rand_affine.py b/tests/test_rand_affine.py index 23e3fd148c..1133a04695 100644 --- a/tests/test_rand_affine.py +++ b/tests/test_rand_affine.py @@ -152,11 +152,13 @@ def test_rand_affine(self, input_param, input_data, expected_val): self.assertTrue(g._cached_grid is not None) assert_allclose(result, expected_val, rtol=_rtol, atol=1e-4, type_test="tensor") - def test_ill_cache(self): + @parameterized.expand([ + (None,), + ((1, 1, -1),) + ]) + def test_ill_cache(self, spatial_size): with self.assertWarns(UserWarning): - RandAffine(cache_grid=True) - with self.assertWarns(UserWarning): - RandAffine(cache_grid=True, spatial_size=(1, 1, -1)) + RandAffine(cache_grid=True, spatial_size=spatial_size) @parameterized.expand(TEST_CASES_SKIPPED_CONSISTENCY) def test_skipped_transform_consistency(self, im, in_dtype): diff --git a/tests/test_rand_affined.py b/tests/test_rand_affined.py index 32fde8dc0f..9dd32c8282 100644 --- a/tests/test_rand_affined.py +++ b/tests/test_rand_affined.py @@ -240,7 +240,7 @@ def test_rand_affined(self, input_param, input_data, expected_val, track_meta): resampler.lazy = False if input_param.get("cache_grid", False): - self.assertTrue(g.rand_affine._cached_grid is not None) + self.assertIsNotNone(g.rand_affine._cached_grid) for key in res: if isinstance(key, str) and key.endswith("_transforms"): continue @@ -272,13 +272,13 @@ def test_rand_affined(self, input_param, input_data, expected_val, track_meta): self.assertEqual(len(v.applied_operations), 0) self.assertTupleEqual(v.shape, input_data[k].shape) - def test_ill_cache(self): + @parameterized.expand([ + (None,), + ((2, -1),) + ]) + def test_ill_cache(self, spatial_size): with self.assertWarns(UserWarning): - # spatial size is None - RandAffined(device=device, spatial_size=None, prob=1.0, cache_grid=True, keys=("img", "seg")) - with self.assertWarns(UserWarning): - # spatial size is dynamic - RandAffined(device=device, spatial_size=(2, -1), prob=1.0, cache_grid=True, keys=("img", "seg")) + RandAffined(device=device, spatial_size=spatial_size, prob=1.0, cache_grid=True, keys=("img", "seg")) if __name__ == "__main__": diff --git a/tests/test_rand_bias_field.py b/tests/test_rand_bias_field.py index 333a9ecba5..328f46b7ee 100644 --- a/tests/test_rand_bias_field.py +++ b/tests/test_rand_bias_field.py @@ -39,7 +39,7 @@ def test_output_shape(self, class_args, img_shape): img = p(np.random.rand(*img_shape)) output = bias_field(img) np.testing.assert_equal(output.shape, img_shape) - self.assertTrue(output.dtype in (np.float32, torch.float32)) + self.assertIn(output.dtype, (np.float32, torch.float32)) img_zero = np.zeros([*img_shape]) output_zero = bias_field(img_zero) diff --git a/tests/test_rand_weighted_cropd.py b/tests/test_rand_weighted_cropd.py index 1524442f61..a1414df0ac 100644 --- a/tests/test_rand_weighted_cropd.py +++ b/tests/test_rand_weighted_cropd.py @@ -154,7 +154,7 @@ def test_rand_weighted_cropd(self, _, init_params, input_data, expected_shape, e crop = RandWeightedCropd(**init_params) crop.set_random_state(10) result = crop(input_data) - self.assertTrue(len(result) == init_params["num_samples"]) + self.assertEqual(len(result), init_params["num_samples"]) _len = len(tuple(input_data.keys())) self.assertTupleEqual(tuple(result[0].keys())[:_len], tuple(input_data.keys())) diff --git a/tests/test_recon_net_utils.py b/tests/test_recon_net_utils.py index 1815000777..48d3b59a17 100644 --- a/tests/test_recon_net_utils.py +++ b/tests/test_recon_net_utils.py @@ -64,7 +64,7 @@ def test_reshape_channel_complex(self, test_data): def test_complex_normalize(self, test_data): result, mean, std = complex_normalize(test_data) result = result * std + mean - self.assertTrue((((result - test_data) ** 2).mean() ** 0.5).item() < 1e-5) + self.assertLess((((result - test_data) ** 2).mean() ** 0.5).item(), 1e-5) @parameterized.expand(TEST_PAD) def test_pad(self, test_data): diff --git a/tests/test_reg_loss_integration.py b/tests/test_reg_loss_integration.py index e8f82eb0c2..1fb81689e6 100644 --- a/tests/test_reg_loss_integration.py +++ b/tests/test_reg_loss_integration.py @@ -99,7 +99,7 @@ def forward(self, x): # backward pass loss_val.backward() optimizer.step() - self.assertTrue(init_loss > loss_val, "loss did not decrease") + self.assertGreater(init_loss, loss_val, "loss did not decrease") if __name__ == "__main__": diff --git a/tests/test_resnet.py b/tests/test_resnet.py index ad1aad8fc6..43b1aa0d18 100644 --- a/tests/test_resnet.py +++ b/tests/test_resnet.py @@ -211,7 +211,7 @@ def test_resnet_shape(self, model, input_param, input_shape, expected_shape): if input_param.get("feed_forward", True): self.assertEqual(result.shape, expected_shape) else: - self.assertTrue(result.shape in expected_shape) + self.assertIn(result.shape, expected_shape) @parameterized.expand(PRETRAINED_TEST_CASES) @skip_if_quick diff --git a/tests/test_sobel_gradient.py b/tests/test_sobel_gradient.py index 3d995a60c9..a0d7cf5a8b 100644 --- a/tests/test_sobel_gradient.py +++ b/tests/test_sobel_gradient.py @@ -164,8 +164,8 @@ def test_sobel_gradients(self, image, arguments, expected_grad): ) def test_sobel_kernels(self, arguments, expected_kernels): sobel = SobelGradients(**arguments) - self.assertTrue(sobel.kernel_diff.dtype == expected_kernels[0].dtype) - self.assertTrue(sobel.kernel_smooth.dtype == expected_kernels[0].dtype) + self.assertEqual(sobel.kernel_diff.dtype, expected_kernels[0].dtype) + self.assertEqual(sobel.kernel_smooth.dtype, expected_kernels[0].dtype) assert_allclose(sobel.kernel_diff, expected_kernels[0]) assert_allclose(sobel.kernel_smooth, expected_kernels[1]) diff --git a/tests/test_sobel_gradientd.py b/tests/test_sobel_gradientd.py index 7499a0410b..03524823a5 100644 --- a/tests/test_sobel_gradientd.py +++ b/tests/test_sobel_gradientd.py @@ -187,8 +187,8 @@ def test_sobel_gradients(self, image_dict, arguments, expected_grad): ) def test_sobel_kernels(self, arguments, expected_kernels): sobel = SobelGradientsd(**arguments) - self.assertTrue(sobel.kernel_diff.dtype == expected_kernels[0].dtype) - self.assertTrue(sobel.kernel_smooth.dtype == expected_kernels[0].dtype) + self.assertEqual(sobel.kernel_diff.dtype, expected_kernels[0].dtype) + self.assertEqual(sobel.kernel_smooth.dtype, expected_kernels[0].dtype) assert_allclose(sobel.kernel_diff, expected_kernels[0]) assert_allclose(sobel.kernel_smooth, expected_kernels[1]) diff --git a/tests/test_tciadataset.py b/tests/test_tciadataset.py index d996922e20..95fe921330 100644 --- a/tests/test_tciadataset.py +++ b/tests/test_tciadataset.py @@ -108,7 +108,7 @@ def _test_dataset(dataset): )[0] shutil.rmtree(os.path.join(testing_dir, collection)) - try: + with self.assertRaisesRegex(RuntimeError, "^Cannot find dataset directory"): TciaDataset( root_dir=testing_dir, collection=collection, @@ -117,8 +117,7 @@ def _test_dataset(dataset): download=False, val_frac=val_frac, ) - except RuntimeError as e: - self.assertTrue(str(e).startswith("Cannot find dataset directory")) + if __name__ == "__main__": diff --git a/tests/test_threadcontainer.py b/tests/test_threadcontainer.py index 9551dec703..568461748b 100644 --- a/tests/test_threadcontainer.py +++ b/tests/test_threadcontainer.py @@ -62,7 +62,7 @@ def test_container(self): self.assertTrue(con.is_alive) self.assertIsNotNone(con.status()) - self.assertTrue(len(con.status_dict) > 0) + self.assertGreater(len(con.status_dict), 0) con.join() diff --git a/tests/test_to_cupy.py b/tests/test_to_cupy.py index 5a1754e7c5..38400f0d3f 100644 --- a/tests/test_to_cupy.py +++ b/tests/test_to_cupy.py @@ -62,8 +62,8 @@ def test_numpy_input_dtype(self): test_data = np.rot90(test_data) self.assertFalse(test_data.flags["C_CONTIGUOUS"]) result = ToCupy(np.uint8)(test_data) - self.assertTrue(result.dtype == cp.uint8) - self.assertTrue(isinstance(result, cp.ndarray)) + self.assertEqual(result.dtype, cp.uint8) + self.assertIsInstance(result, cp.ndarray) self.assertTrue(result.flags["C_CONTIGUOUS"]) cp.testing.assert_allclose(result, test_data) @@ -72,8 +72,8 @@ def test_tensor_input(self): test_data = test_data.rot90() self.assertFalse(test_data.is_contiguous()) result = ToCupy()(test_data) - self.assertTrue(result.dtype == cp.float32) - self.assertTrue(isinstance(result, cp.ndarray)) + self.assertEqual(result.dtype, cp.float32) + self.assertIsInstance(result, cp.ndarray) self.assertTrue(result.flags["C_CONTIGUOUS"]) cp.testing.assert_allclose(result, test_data) @@ -83,8 +83,8 @@ def test_tensor_cuda_input(self): test_data = test_data.rot90() self.assertFalse(test_data.is_contiguous()) result = ToCupy()(test_data) - self.assertTrue(result.dtype == cp.float32) - self.assertTrue(isinstance(result, cp.ndarray)) + self.assertEqual(result.dtype, cp.float32) + self.assertIsInstance(result, cp.ndarray) self.assertTrue(result.flags["C_CONTIGUOUS"]) cp.testing.assert_allclose(result, test_data) @@ -95,8 +95,8 @@ def test_tensor_cuda_input_dtype(self): self.assertFalse(test_data.is_contiguous()) result = ToCupy(dtype="float32")(test_data) - self.assertTrue(result.dtype == cp.float32) - self.assertTrue(isinstance(result, cp.ndarray)) + self.assertEqual(result.dtype, cp.float32) + self.assertIsInstance(result, cp.ndarray) self.assertTrue(result.flags["C_CONTIGUOUS"]) cp.testing.assert_allclose(result, test_data) diff --git a/tests/test_to_numpy.py b/tests/test_to_numpy.py index 8f7cf34865..f4e5f80a29 100644 --- a/tests/test_to_numpy.py +++ b/tests/test_to_numpy.py @@ -32,7 +32,7 @@ def test_cupy_input(self): test_data = cp.rot90(test_data) self.assertFalse(test_data.flags["C_CONTIGUOUS"]) result = ToNumpy()(test_data) - self.assertTrue(isinstance(result, np.ndarray)) + self.assertIsInstance(result, np.ndarray) self.assertTrue(result.flags["C_CONTIGUOUS"]) assert_allclose(result, test_data.get(), type_test=False) @@ -41,8 +41,8 @@ def test_numpy_input(self): test_data = np.rot90(test_data) self.assertFalse(test_data.flags["C_CONTIGUOUS"]) result = ToNumpy(dtype="float32")(test_data) - self.assertTrue(isinstance(result, np.ndarray)) - self.assertTrue(result.dtype == np.float32) + self.assertIsInstance(result, np.ndarray) + self.assertEqual(result.dtype, np.float32) self.assertTrue(result.flags["C_CONTIGUOUS"]) assert_allclose(result, test_data, type_test=False) @@ -51,7 +51,7 @@ def test_tensor_input(self): test_data = test_data.rot90() self.assertFalse(test_data.is_contiguous()) result = ToNumpy(dtype=torch.uint8)(test_data) - self.assertTrue(isinstance(result, np.ndarray)) + self.assertIsInstance(result, np.ndarray) self.assertTrue(result.flags["C_CONTIGUOUS"]) assert_allclose(result, test_data, type_test=False) @@ -61,7 +61,7 @@ def test_tensor_cuda_input(self): test_data = test_data.rot90() self.assertFalse(test_data.is_contiguous()) result = ToNumpy()(test_data) - self.assertTrue(isinstance(result, np.ndarray)) + self.assertIsInstance(result, np.ndarray) self.assertTrue(result.flags["C_CONTIGUOUS"]) assert_allclose(result, test_data, type_test=False) @@ -71,13 +71,13 @@ def test_list_tuple(self): assert_allclose(result, np.asarray(test_data), type_test=False) test_data = ((1, 2), (3, 4)) result = ToNumpy(wrap_sequence=False)(test_data) - self.assertTrue(type(result), tuple) + self.assertIsInstance(result, tuple) assert_allclose(result, ((np.asarray(1), np.asarray(2)), (np.asarray(3), np.asarray(4)))) def test_single_value(self): for test_data in [5, np.array(5), torch.tensor(5)]: result = ToNumpy(dtype=np.uint8)(test_data) - self.assertTrue(isinstance(result, np.ndarray)) + self.assertIsInstance(result, np.ndarray) assert_allclose(result, np.asarray(test_data), type_test=False) self.assertEqual(result.ndim, 0) diff --git a/tests/test_torchvision_fc_model.py b/tests/test_torchvision_fc_model.py index 322cce1161..9cc19db62c 100644 --- a/tests/test_torchvision_fc_model.py +++ b/tests/test_torchvision_fc_model.py @@ -195,8 +195,8 @@ def test_get_module(self): mod = look_up_named_module("model.1.submodule.1.submodule.1.submodule.0.conv", net) self.assertTrue(str(mod).startswith("Conv2d")) self.assertIsInstance(set_named_module(net, "model", torch.nn.Identity()).model, torch.nn.Identity) - self.assertEqual(look_up_named_module("model.1.submodule.1.submodule.1.submodule.conv", net), None) - self.assertEqual(look_up_named_module("test attribute", net), None) + self.assertIsNone(look_up_named_module("model.1.submodule.1.submodule.1.submodule.conv", net)) + self.assertIsNone(look_up_named_module("test attribute", net)) if __name__ == "__main__": diff --git a/tests/test_traceable_transform.py b/tests/test_traceable_transform.py index dd139053e3..6a499b2dd9 100644 --- a/tests/test_traceable_transform.py +++ b/tests/test_traceable_transform.py @@ -33,12 +33,12 @@ def test_default(self): expected_key = "_transforms" a = _TraceTest() for x in a.transform_info_keys(): - self.assertTrue(x in a.get_transform_info()) + self.assertIn(x, a.get_transform_info()) self.assertEqual(a.trace_key(), expected_key) data = {"image": "test"} data = a(data) # adds to the stack - self.assertTrue(isinstance(data[expected_key], list)) + self.assertIsInstance(data[expected_key], list) self.assertEqual(data[expected_key][0]["class"], "_TraceTest") data = a(data) # adds to the stack diff --git a/tests/test_tversky_loss.py b/tests/test_tversky_loss.py index efe1f2cdf3..1c1310c316 100644 --- a/tests/test_tversky_loss.py +++ b/tests/test_tversky_loss.py @@ -165,17 +165,16 @@ def test_ill_shape(self): with self.assertRaisesRegex(ValueError, ""): TverskyLoss(reduction=None)(chn_input, chn_target) - def test_input_warnings(self): + @parameterized.expand([ + (False, False, False), + (False, True, False), + (False, False, True), + ]) + def test_input_warnings(self, include_background, softmax, to_onehot_y): chn_input = torch.ones((1, 1, 3)) chn_target = torch.ones((1, 1, 3)) with self.assertWarns(Warning): - loss = TverskyLoss(include_background=False) - loss.forward(chn_input, chn_target) - with self.assertWarns(Warning): - loss = TverskyLoss(softmax=True) - loss.forward(chn_input, chn_target) - with self.assertWarns(Warning): - loss = TverskyLoss(to_onehot_y=True) + loss = TverskyLoss(include_background=include_background, softmax=softmax, to_onehot_y=to_onehot_y) loss.forward(chn_input, chn_target) def test_script(self): diff --git a/tests/test_ultrasound_confidence_map_transform.py b/tests/test_ultrasound_confidence_map_transform.py index f672961700..e40c972e92 100644 --- a/tests/test_ultrasound_confidence_map_transform.py +++ b/tests/test_ultrasound_confidence_map_transform.py @@ -12,6 +12,7 @@ from __future__ import annotations import unittest +from parameterized import parameterized import numpy as np import torch @@ -535,162 +536,107 @@ def test_parameters(self): with self.assertRaises(ValueError): UltrasoundConfidenceMapTransform(sink_mode="unknown") - def test_rgb(self): + @parameterized.expand([ + ("all", SINK_ALL_OUTPUT), + ("mid", SINK_MID_OUTPUT), + ("min", SINK_MIN_OUTPUT), + ("mask", SINK_MASK_OUTPUT, True), + ]) + def test_ultrasound_confidence_map_transform(self, sink_mode, expected_output, use_mask=False): # RGB image input_img_rgb = np.expand_dims(np.repeat(self.input_img_np, 3, axis=0), axis=0) input_img_rgb_torch = torch.from_numpy(input_img_rgb) - transform = UltrasoundConfidenceMapTransform(sink_mode="all") - result_torch = transform(input_img_rgb_torch) - self.assertIsInstance(result_torch, torch.Tensor) - assert_allclose(result_torch, torch.tensor(SINK_ALL_OUTPUT), rtol=1e-4, atol=1e-4) - result_np = transform(input_img_rgb) - self.assertIsInstance(result_np, np.ndarray) - assert_allclose(result_np, SINK_ALL_OUTPUT, rtol=1e-4, atol=1e-4) + transform = UltrasoundConfidenceMapTransform(sink_mode=sink_mode) - transform = UltrasoundConfidenceMapTransform(sink_mode="mid") - result_torch = transform(input_img_rgb_torch) - self.assertIsInstance(result_torch, torch.Tensor) - assert_allclose(result_torch, torch.tensor(SINK_MID_OUTPUT), rtol=1e-4, atol=1e-4) - result_np = transform(input_img_rgb) - self.assertIsInstance(result_np, np.ndarray) - assert_allclose(result_np, SINK_MID_OUTPUT, rtol=1e-4, atol=1e-4) + if use_mask: + result_torch = transform(input_img_rgb_torch, self.input_mask_torch) + result_np = transform(input_img_rgb, self.input_mask_np) + else: + result_torch = transform(input_img_rgb_torch) + result_np = transform(input_img_rgb) - transform = UltrasoundConfidenceMapTransform(sink_mode="min") - result_torch = transform(input_img_rgb_torch) self.assertIsInstance(result_torch, torch.Tensor) - assert_allclose(result_torch, torch.tensor(SINK_MIN_OUTPUT), rtol=1e-4, atol=1e-4) - result_np = transform(input_img_rgb) + assert_allclose(result_torch, torch.tensor(expected_output), rtol=1e-4, atol=1e-4) self.assertIsInstance(result_np, np.ndarray) - assert_allclose(result_np, SINK_MIN_OUTPUT, rtol=1e-4, atol=1e-4) - - transform = UltrasoundConfidenceMapTransform(sink_mode="mask") - result_torch = transform(input_img_rgb_torch, self.input_mask_torch) - self.assertIsInstance(result_torch, torch.Tensor) - assert_allclose(result_torch, torch.tensor(SINK_MASK_OUTPUT), rtol=1e-4, atol=1e-4) - result_np = transform(input_img_rgb, self.input_mask_np) - self.assertIsInstance(result_np, np.ndarray) - assert_allclose(result_np, SINK_MASK_OUTPUT, rtol=1e-4, atol=1e-4) - - def test_multi_channel_2d(self): - # 2D multi-channel image + assert_allclose(result_np, expected_output, rtol=1e-4, atol=1e-4) + + @parameterized.expand([ + ("all", SINK_ALL_OUTPUT), + ("mid", SINK_MID_OUTPUT), + ("min", SINK_MIN_OUTPUT), + ("mask", SINK_MASK_OUTPUT, True), # Adding a flag for mask cases + ]) + def test_multi_channel_2d(self, sink_mode, expected_output, use_mask=False): input_img_rgb = np.expand_dims(np.repeat(self.input_img_np, 17, axis=0), axis=0) input_img_rgb_torch = torch.from_numpy(input_img_rgb) - transform = UltrasoundConfidenceMapTransform(sink_mode="all") - result_torch = transform(input_img_rgb_torch) - self.assertIsInstance(result_torch, torch.Tensor) - assert_allclose(result_torch, torch.tensor(SINK_ALL_OUTPUT), rtol=1e-4, atol=1e-4) - result_np = transform(input_img_rgb) - self.assertIsInstance(result_np, np.ndarray) - assert_allclose(result_np, SINK_ALL_OUTPUT, rtol=1e-4, atol=1e-4) - - transform = UltrasoundConfidenceMapTransform(sink_mode="mid") - result_torch = transform(input_img_rgb_torch) - self.assertIsInstance(result_torch, torch.Tensor) - assert_allclose(result_torch, torch.tensor(SINK_MID_OUTPUT), rtol=1e-4, atol=1e-4) - result_np = transform(input_img_rgb) - self.assertIsInstance(result_np, np.ndarray) - assert_allclose(result_np, SINK_MID_OUTPUT, rtol=1e-4, atol=1e-4) + transform = UltrasoundConfidenceMapTransform(sink_mode=sink_mode) - transform = UltrasoundConfidenceMapTransform(sink_mode="min") - result_torch = transform(input_img_rgb_torch) - self.assertIsInstance(result_torch, torch.Tensor) - assert_allclose(result_torch, torch.tensor(SINK_MIN_OUTPUT), rtol=1e-4, atol=1e-4) - result_np = transform(input_img_rgb) - self.assertIsInstance(result_np, np.ndarray) - assert_allclose(result_np, SINK_MIN_OUTPUT, rtol=1e-4, atol=1e-4) + if use_mask: + result_torch = transform(input_img_rgb_torch, self.input_mask_torch) + result_np = transform(input_img_rgb, self.input_mask_np) + else: + result_torch = transform(input_img_rgb_torch) + result_np = transform(input_img_rgb) - transform = UltrasoundConfidenceMapTransform(sink_mode="mask") - result_torch = transform(input_img_rgb_torch, self.input_mask_torch) self.assertIsInstance(result_torch, torch.Tensor) - assert_allclose(result_torch, torch.tensor(SINK_MASK_OUTPUT), rtol=1e-4, atol=1e-4) - result_np = transform(input_img_rgb, self.input_mask_np) + assert_allclose(result_torch, torch.tensor(expected_output), rtol=1e-4, atol=1e-4) self.assertIsInstance(result_np, np.ndarray) - assert_allclose(result_np, SINK_MASK_OUTPUT, rtol=1e-4, atol=1e-4) - - def test_non_one_first_dim(self): - # Image without first dimension as 1 + assert_allclose(result_np, expected_output, rtol=1e-4, atol=1e-4) + + @parameterized.expand([ + ("all",), + ("mid",), + ("min",), + ("mask",), + ]) + def test_non_one_first_dim(self, sink_mode): + transform = UltrasoundConfidenceMapTransform(sink_mode=sink_mode) input_img_rgb = np.repeat(self.input_img_np, 3, axis=0) input_img_rgb_torch = torch.from_numpy(input_img_rgb) - transform = UltrasoundConfidenceMapTransform(sink_mode="all") - with self.assertRaises(ValueError): - transform(input_img_rgb_torch) - with self.assertRaises(ValueError): - transform(input_img_rgb) - - transform = UltrasoundConfidenceMapTransform(sink_mode="mid") - with self.assertRaises(ValueError): - transform(input_img_rgb_torch) - with self.assertRaises(ValueError): - transform(input_img_rgb) - - transform = UltrasoundConfidenceMapTransform(sink_mode="min") - with self.assertRaises(ValueError): - transform(input_img_rgb_torch) - with self.assertRaises(ValueError): - transform(input_img_rgb) - - transform = UltrasoundConfidenceMapTransform(sink_mode="mask") - with self.assertRaises(ValueError): - transform(input_img_rgb_torch, self.input_mask_torch) - with self.assertRaises(ValueError): - transform(input_img_rgb, self.input_mask_np) - - def test_no_first_dim(self): - # Image without first dimension + if sink_mode == "mask": + with self.assertRaises(ValueError): + transform(input_img_rgb_torch, self.input_mask_torch) + with self.assertRaises(ValueError): + transform(input_img_rgb, self.input_mask_np) + else: + with self.assertRaises(ValueError): + transform(input_img_rgb_torch) + with self.assertRaises(ValueError): + transform(input_img_rgb) + + @parameterized.expand([ + ("all",), + ("mid",), + ("min",), + ("mask",) + ]) + def test_no_first_dim(self, sink_mode): input_img_rgb = self.input_img_np[0] input_img_rgb_torch = torch.from_numpy(input_img_rgb) - transform = UltrasoundConfidenceMapTransform(sink_mode="all") - with self.assertRaises(ValueError): - transform(input_img_rgb_torch) - with self.assertRaises(ValueError): - transform(input_img_rgb) + transform = UltrasoundConfidenceMapTransform(sink_mode=sink_mode) - transform = UltrasoundConfidenceMapTransform(sink_mode="mid") with self.assertRaises(ValueError): transform(input_img_rgb_torch) with self.assertRaises(ValueError): transform(input_img_rgb) - transform = UltrasoundConfidenceMapTransform(sink_mode="min") - with self.assertRaises(ValueError): - transform(input_img_rgb_torch) - with self.assertRaises(ValueError): - transform(input_img_rgb) - - transform = UltrasoundConfidenceMapTransform(sink_mode="mask") - with self.assertRaises(ValueError): - transform(input_img_rgb_torch, self.input_mask_torch) - with self.assertRaises(ValueError): - transform(input_img_rgb, self.input_mask_np) - - def test_sink_all(self): - transform = UltrasoundConfidenceMapTransform(sink_mode="all") - - # This should not raise an exception for torch tensor - result_torch = transform(self.input_img_torch) - self.assertIsInstance(result_torch, torch.Tensor) - - # This should not raise an exception for numpy array - result_np = transform(self.input_img_np) - self.assertIsInstance(result_np, np.ndarray) - - def test_sink_mid(self): - transform = UltrasoundConfidenceMapTransform(sink_mode="mid") - - # This should not raise an exception for torch tensor - result_torch = transform(self.input_img_torch) - self.assertIsInstance(result_torch, torch.Tensor) - - # This should not raise an exception for numpy array - result_np = transform(self.input_img_np) - self.assertIsInstance(result_np, np.ndarray) - - def test_sink_min(self): - transform = UltrasoundConfidenceMapTransform(sink_mode="min") + if sink_mode == "mask": + with self.assertRaises(ValueError): + transform(input_img_rgb_torch, self.input_mask_torch) + with self.assertRaises(ValueError): + transform(input_img_rgb, self.input_mask_np) + + @parameterized.expand([ + ("all",), + ("mid",), + ("min",), + ]) + def test_sink_mode(self, mode): + transform = UltrasoundConfidenceMapTransform(sink_mode=mode) # This should not raise an exception for torch tensor result_torch = transform(self.input_img_torch) diff --git a/tests/test_vit.py b/tests/test_vit.py index a84883cba0..f30e04a2fc 100644 --- a/tests/test_vit.py +++ b/tests/test_vit.py @@ -69,75 +69,27 @@ def test_shape(self, input_param, input_shape, expected_shape): result, _ = net(torch.randn(input_shape)) self.assertEqual(result.shape, expected_shape) - def test_ill_arg(self): + @parameterized.expand([ + (1, (128, 128, 128), (16, 16, 16), 128, 3072, 12, 12, "conv", False, 5.0), + (1, (32, 32, 32), (64, 64, 64), 512, 3072, 12, 8, "perceptron", False, 0.3), + (1, (96, 96, 96), (8, 8, 8), 512, 3072, 12, 14, "conv", False, 0.3), + (1, (97, 97, 97), (4, 4, 4), 768, 3072, 12, 8, "perceptron", True, 0.3), + (4, (96, 96, 96), (16, 16, 16), 768, 3072, 12, 12, "perc", False, 0.3), + ]) + def test_ill_arg(self, in_channels, img_size, patch_size, hidden_size, mlp_dim, num_layers, num_heads, pos_embed, + classification, dropout_rate): with self.assertRaises(ValueError): ViT( - in_channels=1, - img_size=(128, 128, 128), - patch_size=(16, 16, 16), - hidden_size=128, - mlp_dim=3072, - num_layers=12, - num_heads=12, - pos_embed="conv", - classification=False, - dropout_rate=5.0, - ) - - with self.assertRaises(ValueError): - ViT( - in_channels=1, - img_size=(32, 32, 32), - patch_size=(64, 64, 64), - hidden_size=512, - mlp_dim=3072, - num_layers=12, - num_heads=8, - pos_embed="perceptron", - classification=False, - dropout_rate=0.3, - ) - - with self.assertRaises(ValueError): - ViT( - in_channels=1, - img_size=(96, 96, 96), - patch_size=(8, 8, 8), - hidden_size=512, - mlp_dim=3072, - num_layers=12, - num_heads=14, - pos_embed="conv", - classification=False, - dropout_rate=0.3, - ) - - with self.assertRaises(ValueError): - ViT( - in_channels=1, - img_size=(97, 97, 97), - patch_size=(4, 4, 4), - hidden_size=768, - mlp_dim=3072, - num_layers=12, - num_heads=8, - pos_embed="perceptron", - classification=True, - dropout_rate=0.3, - ) - - with self.assertRaises(ValueError): - ViT( - in_channels=4, - img_size=(96, 96, 96), - patch_size=(16, 16, 16), - hidden_size=768, - mlp_dim=3072, - num_layers=12, - num_heads=12, - pos_embed="perc", - classification=False, - dropout_rate=0.3, + in_channels=in_channels, + img_size=img_size, + patch_size=patch_size, + hidden_size=hidden_size, + mlp_dim=mlp_dim, + num_layers=num_layers, + num_heads=num_heads, + pos_embed=pos_embed, + classification=classification, + dropout_rate=dropout_rate, ) @parameterized.expand(TEST_CASE_Vit) diff --git a/tests/test_vitautoenc.py b/tests/test_vitautoenc.py index cc3d493bb3..a54cba524a 100644 --- a/tests/test_vitautoenc.py +++ b/tests/test_vitautoenc.py @@ -82,83 +82,27 @@ def test_shape(self, input_param, input_shape, expected_shape): result, _ = net(torch.randn(input_shape)) self.assertEqual(result.shape, expected_shape) - def test_ill_arg(self): + @parameterized.expand([ + ("img_size_too_large_for_patch_size", 1, (32, 32, 32), (64, 64, 64), 512, 3072, 12, 8, "perceptron", 0.3), + ("num_heads_out_of_bound", 1, (96, 96, 96), (8, 8, 8), 512, 3072, 12, 14, "conv", 0.3), + ("img_size_not_divisible_by_patch_size", 1, (97, 97, 97), (4, 4, 4), 768, 3072, 12, 8, "perceptron", 0.3), + ("invalid_pos_embed", 4, (96, 96, 96), (16, 16, 16), 768, 3072, 12, 12, "perc", 0.3), + ("patch_size_not_divisible", 4, (96, 96, 96), (9, 9, 9), 768, 3072, 12, 12, "perc", 0.3), + # Add more test cases as needed + ]) + def test_ill_arg(self, name, in_channels, img_size, patch_size, hidden_size, mlp_dim, num_layers, num_heads, + pos_embed, dropout_rate): with self.assertRaises(ValueError): ViTAutoEnc( - in_channels=1, - img_size=(128, 128, 128), - patch_size=(16, 16, 16), - hidden_size=128, - mlp_dim=3072, - num_layers=12, - num_heads=12, - pos_embed="conv", - dropout_rate=5.0, - ) - - with self.assertRaises(ValueError): - ViTAutoEnc( - in_channels=1, - img_size=(32, 32, 32), - patch_size=(64, 64, 64), - hidden_size=512, - mlp_dim=3072, - num_layers=12, - num_heads=8, - pos_embed="perceptron", - dropout_rate=0.3, - ) - - with self.assertRaises(ValueError): - ViTAutoEnc( - in_channels=1, - img_size=(96, 96, 96), - patch_size=(8, 8, 8), - hidden_size=512, - mlp_dim=3072, - num_layers=12, - num_heads=14, - pos_embed="conv", - dropout_rate=0.3, - ) - - with self.assertRaises(ValueError): - ViTAutoEnc( - in_channels=1, - img_size=(97, 97, 97), - patch_size=(4, 4, 4), - hidden_size=768, - mlp_dim=3072, - num_layers=12, - num_heads=8, - pos_embed="perceptron", - dropout_rate=0.3, - ) - - with self.assertRaises(ValueError): - ViTAutoEnc( - in_channels=4, - img_size=(96, 96, 96), - patch_size=(16, 16, 16), - hidden_size=768, - mlp_dim=3072, - num_layers=12, - num_heads=12, - pos_embed="perc", - dropout_rate=0.3, - ) - - with self.assertRaises(ValueError): - ViTAutoEnc( - in_channels=4, - img_size=(96, 96, 96), - patch_size=(9, 9, 9), - hidden_size=768, - mlp_dim=3072, - num_layers=12, - num_heads=12, - pos_embed="perc", - dropout_rate=0.3, + in_channels=in_channels, + img_size=img_size, + patch_size=patch_size, + hidden_size=hidden_size, + mlp_dim=mlp_dim, + num_layers=num_layers, + num_heads=num_heads, + pos_embed=pos_embed, + dropout_rate=dropout_rate, ) diff --git a/tests/test_warp.py b/tests/test_warp.py index bac595224f..55f40764c3 100644 --- a/tests/test_warp.py +++ b/tests/test_warp.py @@ -124,7 +124,7 @@ def test_itk_benchmark(self): relative_diff = np.mean( np.divide(monai_result - itk_result, itk_result, out=np.zeros_like(itk_result), where=(itk_result != 0)) ) - self.assertTrue(relative_diff < 0.01) + self.assertLess(relative_diff, 0.01) @parameterized.expand(TEST_CASES, skip_on_empty=True) def test_resample(self, input_param, input_data, expected_val):