diff --git a/tests/test_utils/test_activation_checkpointing.py b/tests/test_utils/test_activation_checkpointing.py index a68644254cfa..9ba600170d28 100644 --- a/tests/test_utils/test_activation_checkpointing.py +++ b/tests/test_utils/test_activation_checkpointing.py @@ -6,7 +6,7 @@ import torch.nn.functional as F from colossalai.context.parallel_mode import ParallelMode from colossalai.context.random import add_seed, seed, set_mode, reset_seeds -from colossalai.utils import checkpoint +from colossalai.utils.activation_checkpoint import checkpoint def forward(x, weight): @@ -17,9 +17,10 @@ def forward(x, weight): @pytest.mark.gpu -@pytest.mark.skip("set seed error") @pytest.mark.parametrize("cpu_offload", [True, False]) def test_activation_checkpointing(cpu_offload): + # clear all previous seeds possibly set by other tests + reset_seeds() # We put initilization here to avoid change cuda rng state below inputs = torch.rand(2, 2, requires_grad=True, device='cuda') @@ -62,3 +63,8 @@ def test_activation_checkpointing(cpu_offload): # other tests will fail if running together with this test # as other tests can't overwrite the seed set by this test reset_seeds() + + +if __name__ == '__main__': + test_activation_checkpointing(cpu_offload=False) + test_activation_checkpointing(cpu_offload=True)