diff --git a/monai/apps/detection/transforms/box_ops.py b/monai/apps/detection/transforms/box_ops.py index d63a155e42..fb870c952e 100644 --- a/monai/apps/detection/transforms/box_ops.py +++ b/monai/apps/detection/transforms/box_ops.py @@ -242,6 +242,9 @@ def convert_box_to_mask( boxes_mask_np = np.ones((labels.shape[0],) + spatial_size, dtype=np.int16) * np.int16(bg_label) boxes_np: np.ndarray = convert_data_type(boxes, np.ndarray, dtype=np.int32)[0] + if np.any(boxes_np[:, spatial_dims:] > np.array(spatial_size)): + raise ValueError("Some boxes are larger than the image.") + labels_np, *_ = convert_to_dst_type(src=labels, dst=boxes_np) for b in range(boxes_np.shape[0]): # generate a foreground mask diff --git a/tests/test_box_transform.py b/tests/test_box_transform.py index 94bd6ade52..e114f8869f 100644 --- a/tests/test_box_transform.py +++ b/tests/test_box_transform.py @@ -131,6 +131,24 @@ def test_value_3d_mask(self): assert_allclose(data_back["boxes"], data["boxes"], type_test=False, device_test=False, atol=1e-3) assert_allclose(data_back["labels"], data["labels"], type_test=False, device_test=False, atol=1e-3) + def test_shape_assertion(self): + test_dtype = torch.float32 + image = np.zeros((1, 10, 10, 10)) + boxes = np.array([[7, 8, 9, 10, 12, 13]]) + data = {"image": image, "boxes": boxes, "labels": np.array((1,))} + data = CastToTyped(keys=["image", "boxes"], dtype=test_dtype)(data) + transform_to_mask = BoxToMaskd( + box_keys="boxes", + box_mask_keys="box_mask", + box_ref_image_keys="image", + label_keys="labels", + min_fg_label=0, + ellipse_mask=False, + ) + with self.assertRaises(ValueError) as context: + transform_to_mask(data) + self.assertTrue("Some boxes are larger than the image." in str(context.exception)) + @parameterized.expand(TESTS_3D) def test_value_3d( self,