diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 7f970dfb15..a1bafaf103 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -361,6 +361,14 @@ NVIDIA Tool Extension (NVTX) """""""""""""" .. autoclass:: RandRangePop +`Range` +""""""" +.. autoclass:: Range + +`RandRange` +""""""""""" +.. autoclass:: RandRange + `Mark` """""" .. autoclass:: Mark diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index d15b8866e5..f259ff86bc 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -204,6 +204,10 @@ RandMarkd, RandMarkD, RandMarkDict, + RandRange, + RandRanged, + RandRangeD, + RandRangeDict, RandRangePop, RandRangePopd, RandRangePopD, @@ -212,6 +216,10 @@ RandRangePushd, RandRangePushD, RandRangePushDict, + Range, + Ranged, + RangeD, + RangeDict, RangePop, RangePopd, RangePopD, diff --git a/monai/transforms/nvtx.py b/monai/transforms/nvtx.py index 12c03dc028..a500eb3c90 100644 --- a/monai/transforms/nvtx.py +++ b/monai/transforms/nvtx.py @@ -12,6 +12,8 @@ Wrapper around NVIDIA Tools Extension for profiling MONAI transformations """ +from typing import Optional + from monai.transforms.transform import RandomizableTransform, Transform from monai.utils import optional_import @@ -26,6 +28,10 @@ "RandMarkd", "RandMarkD", "RandMarkDict", + "RandRange", + "RandRanged", + "RandRangeD", + "RandRangeDict", "RandRangePop", "RandRangePopd", "RandRangePopD", @@ -34,6 +40,10 @@ "RandRangePushd", "RandRangePushD", "RandRangePushDict", + "Range", + "Ranged", + "RangeD", + "RangeDict", "RangePop", "RangePopd", "RangePopD", @@ -91,6 +101,36 @@ class RandRangePop(RangePop, RandomizableTransform): """ +class Range(Transform): + """ + Pushes an NVTX range before a transform, and pops it afterwards. + Stores zero-based depth of the range that is started. + + Args: + msg: ASCII message to associate with range + """ + + def __init__(self, transform: Transform, msg: Optional[str] = None) -> None: + if msg is None: + msg = type(transform).__name__ + self.msg = msg + self.transform = transform + self.depth = None + + def __call__(self, data): + self.depth = _nvtx.rangePushA(self.msg) + data = self.transform(data) + _nvtx.rangePop() + return data + + +class RandRange(Range, RandomizableTransform): + """ + Pushes an NVTX range at the before a transfrom, and pops it afterwards.(RandomizableTransform). + Stores zero-based depth of the range that is ended. + """ + + class Mark(Transform): """ Mark an instantaneous event that occurred at some point. @@ -117,9 +157,14 @@ class RandMark(Mark, RandomizableTransform): """ -MarkDict = MarkD = Markd = Mark -RandMarkDict = RandMarkD = RandMarkd = RandMark -RandRangePopDict = RandRangePopD = RandRangePopd = RandRangePop +RangePushDict = RangePushD = RangePushd = RangePush RandRangePushDict = RandRangePushD = RandRangePushd = RandRangePush + RangePopDict = RangePopD = RangePopd = RangePop -RangePushDict = RangePushD = RangePushd = RangePush +RandRangePopDict = RandRangePopD = RandRangePopd = RandRangePop + +RangeDict = RangeD = Ranged = Range +RandRangeDict = RandRangeD = RandRanged = RandRange + +MarkDict = MarkD = Markd = Mark +RandMarkDict = RandMarkD = RandMarkd = RandMark diff --git a/tests/test_nvtx_transform.py b/tests/test_nvtx_transform.py index d1887377ba..01f2e80d26 100644 --- a/tests/test_nvtx_transform.py +++ b/tests/test_nvtx_transform.py @@ -21,10 +21,14 @@ MarkD, RandMark, RandMarkD, + RandRange, + RandRangeD, RandRangePop, RandRangePopD, RandRangePush, RandRangePushD, + Range, + RangeD, RangePop, RangePopD, RangePush, @@ -62,12 +66,12 @@ class TestNVTXTransforms(unittest.TestCase): def test_nvtx_transfroms_alone(self, input): transforms = Compose( [ - Mark("Mark: Transform Starts!"), + Mark("Mark: Transforms Start!"), RangePush("Range: RandFlipD"), RangePop(), RandRangePush("Range: ToTensorD"), RandRangePop(), - RandMark("Mark: Transform Ends!"), + RandMark("Mark: Transforms End!"), ] ) output = transforms(input) @@ -82,32 +86,33 @@ def test_nvtx_transfroms_alone(self, input): @parameterized.expand([TEST_CASE_ARRAY_0, TEST_CASE_ARRAY_1]) @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX!") def test_nvtx_transfroms_array(self, input): + # with prob == 0.0 transforms = Compose( [ - RandMark("Mark: Transform Starts!"), + RandMark("Mark: Transforms Start!"), RandRangePush("Range: RandFlip"), RandFlip(prob=0.0), RandRangePop(), RangePush("Range: ToTensor"), ToTensor(), RangePop(), - Mark("Mark: Transform Ends!"), + Mark("Mark: Transforms End!"), ] ) output = transforms(input) self.assertIsInstance(output, torch.Tensor) np.testing.assert_array_equal(input, output) - + # with prob == 1.0 transforms = Compose( [ - RandMark("Mark: Transform Starts!"), + RandMark("Mark: Transforms Start!"), RandRangePush("Range: RandFlip"), RandFlip(prob=1.0), RandRangePop(), RangePush("Range: ToTensor"), ToTensor(), RangePop(), - Mark("Mark: Transform Ends!"), + Mark("Mark: Transforms End!"), ] ) output = transforms(input) @@ -116,33 +121,90 @@ def test_nvtx_transfroms_array(self, input): @parameterized.expand([TEST_CASE_DICT_0, TEST_CASE_DICT_1]) @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX!") - def test_nvtx_transfromsd(self, input): + def test_nvtx_transfroms_dict(self, input): + # with prob == 0.0 transforms = Compose( [ - RandMarkD("Mark: Transform Starts!"), + RandMarkD("Mark: Transforms (p=0) Start!"), RandRangePushD("Range: RandFlipD"), RandFlipD(keys="image", prob=0.0), RandRangePopD(), RangePushD("Range: ToTensorD"), ToTensorD(keys=("image")), RangePopD(), - MarkD("Mark: Transform Ends!"), + MarkD("Mark: Transforms (p=0) End!"), ] ) output = transforms(input) self.assertIsInstance(output["image"], torch.Tensor) np.testing.assert_array_equal(input["image"], output["image"]) - + # with prob == 1.0 transforms = Compose( [ - RandMarkD("Mark: Transform Starts!"), + RandMarkD("Mark: Transforms (p=1) Start!"), RandRangePushD("Range: RandFlipD"), RandFlipD(keys="image", prob=1.0), RandRangePopD(), RangePushD("Range: ToTensorD"), ToTensorD(keys=("image")), RangePopD(), - MarkD("Mark: Transform Ends!"), + MarkD("Mark: Transforms (p=1) End!"), + ] + ) + output = transforms(input) + self.assertIsInstance(output["image"], torch.Tensor) + np.testing.assert_array_equal(input["image"], Flip()(output["image"].numpy())) + + @parameterized.expand([TEST_CASE_ARRAY_0, TEST_CASE_ARRAY_1]) + @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX!") + def test_nvtx_range_array(self, input): + # with prob == 0.0 + transforms = Compose( + [ + RandMark("Mark: Transforms (p=0) Start!"), + RandRange(RandFlip(prob=0.0)), + Range(ToTensor()), + Mark("Mark: Transforms (p=0) End!"), + ] + ) + output = transforms(input) + self.assertIsInstance(output, torch.Tensor) + np.testing.assert_array_equal(input, output) + # with prob == 1.0 + transforms = Compose( + [ + RandMark("Mark: Transforms (p=1) Start!"), + RandRange(RandFlip(prob=1.0)), + Range(ToTensor()), + Mark("Mark: Transforms (p=1) End!"), + ] + ) + output = transforms(input) + self.assertIsInstance(output, torch.Tensor) + np.testing.assert_array_equal(input, Flip()(output.numpy())) + + @parameterized.expand([TEST_CASE_DICT_0, TEST_CASE_DICT_1]) + @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX!") + def test_nvtx_range_dict(self, input): + # with prob == 0.0 + transforms = Compose( + [ + RandMarkD("Mark: Transforms (p=0) Start!"), + RandRangeD(RandFlipD(keys="image", prob=0.0)), + RangeD(ToTensorD(keys=("image"))), + MarkD("Mark: Transforms (p=0) End!"), + ] + ) + output = transforms(input) + self.assertIsInstance(output["image"], torch.Tensor) + np.testing.assert_array_equal(input["image"], output["image"]) + # with prob == 1.0 + transforms = Compose( + [ + RandMarkD("Mark: Transforms (p=1) Start!"), + RandRangeD(RandFlipD(keys="image", prob=1.0)), + RangeD(ToTensorD(keys=("image"))), + MarkD("Mark: Transforms (p=1) End!"), ] ) output = transforms(input)