From 86be7440076de238bacdd4bf401513d44025c388 Mon Sep 17 00:00:00 2001 From: jiaruifang Date: Wed, 6 Jul 2022 17:34:24 +0800 Subject: [PATCH 1/2] make it faster --- tests/test_utils/test_colo_checkpoint.py | 39 +++++++++++++----------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/tests/test_utils/test_colo_checkpoint.py b/tests/test_utils/test_colo_checkpoint.py index 6e7d4441d760..48742fc18a58 100644 --- a/tests/test_utils/test_colo_checkpoint.py +++ b/tests/test_utils/test_colo_checkpoint.py @@ -1,21 +1,20 @@ from abc import ABC, abstractmethod -import os, sys, shutil +import os, shutil import torch import torch.nn as nn import pytest import copy -import operator -import colossalai -from colossalai.context.parallel_mode import ParallelMode +from functools import partial + import torch.multiprocessing as mp import torch.distributed as dist + +import colossalai from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils.cuda import get_current_device from colossalai.utils import free_port from colossalai.utils.model.colo_init_context import ColoInitContext -from colossalai.tensor import ColoTensorSpec, ComputePattern, ComputeSpec, DistSpecManager, distspec, ProcessGroup, ColoTensor -from colossalai.core import global_context as gpc -from functools import partial +from colossalai.tensor import ComputePattern, ComputeSpec, DistSpecManager, distspec, ProcessGroup from colossalai.nn.parallel.data_parallel import ColoDDP from colossalai.utils.checkpoint import save_checkpoint, load_checkpoint from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR @@ -46,15 +45,17 @@ def __len__(self): class DummyDataLoader(DummyDataGenerator): - batch_size = 128 - category = 16 - feature_size = 256 + + def __init__(self, batch_size, category, feature_size, length=10): + super().__init__(length) + self.batch_size = batch_size + self.category = category + self.feature_size = feature_size def generate(self): image_dict = {} - image_dict['pixel_values'] = torch.rand( - DummyDataLoader.batch_size, DummyDataLoader.feature_size, device=get_current_device()) * 2 - 1 - image_dict['label'] = torch.randint(DummyDataLoader.category, (DummyDataLoader.batch_size,), + image_dict['pixel_values'] = torch.rand(self.batch_size, self.feature_size, device=get_current_device()) * 2 - 1 + image_dict['label'] = torch.randint(self.category, (self.batch_size,), dtype=torch.int64, device=get_current_device()) return image_dict @@ -102,11 +103,15 @@ def remove(path): def run_checkpoint(init_spec_func, use_ddp, test_epoch, pg): - train_dataloader = DummyDataLoader(length=16) + batch = 3 + feature = 32 + category = 16 + train_dataloader = DummyDataLoader(batch, category, feature, length=16) with ColoInitContext(device=get_current_device()): - model = MLP(256, 16, 64) - model_reload = MLP(256, 16, 64) - model_ref = MLP(256, 16, 64) + model = MLP(feature, category) + model_reload = MLP(feature, category) + model_ref = MLP(feature, category) + model = model.cuda() model_reload = model_reload.cuda() model_ref = model_ref.cuda() From 232665023ace91d64b6343f437a00b56786f1854 Mon Sep 17 00:00:00 2001 From: jiaruifang Date: Thu, 14 Jul 2022 14:01:22 +0800 Subject: [PATCH 2/2] add missing files. --- tests/test_gemini/test_param_op.py | 95 ++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 tests/test_gemini/test_param_op.py diff --git a/tests/test_gemini/test_param_op.py b/tests/test_gemini/test_param_op.py new file mode 100644 index 000000000000..ed9d51d9a021 --- /dev/null +++ b/tests/test_gemini/test_param_op.py @@ -0,0 +1,95 @@ +from colossalai.gemini.paramhooks import BaseParamHookMgr +from torch import nn +import torch +import torch.nn.functional as F +import copy + + +class SubNet(nn.Module): + + def __init__(self, out_features) -> None: + super().__init__() + self.bias = nn.Parameter(torch.zeros(out_features)) + + def forward(self, x, weight): + return F.linear(x, weight, self.bias) + + +class Net(nn.Module): + + def __init__(self, checkpoint=False) -> None: + super().__init__() + self.fc1 = nn.Linear(5, 5) + self.sub_fc = SubNet(5) + self.fc2 = nn.Linear(5, 1) + + def forward(self, x): + x = self.fc1(x) + x = self.sub_fc(x, self.fc1.weight) + x = self.fc1(x) + x = self.fc2(x) + return x + + +def net_data(): + return (torch.randn(2, 5, dtype=torch.float, device='cuda'),) + + +def allclose(tensor_a: torch.Tensor, tensor_b: torch.Tensor, loose=False) -> bool: + if loose: + return torch.allclose(tensor_a, tensor_b, atol=1e-3, rtol=1e-3) + return torch.allclose(tensor_a, tensor_b) + + +def test_base_param_hook(): + torch.manual_seed(0) + model = Net(checkpoint=True).cuda() + model.train() + inputs = net_data() + + def run_model(model, inputs, use_param_hook=False): + if use_param_hook: + + class HooKWrapper: + + def __init__(self) -> None: + self.hook_triggered_times = 0 + + def wrapper_func(self): + + def hook(param, grad) -> torch.Tensor or None: + self.hook_triggered_times += 1 + return grad + + return hook + + hookwrapper = HooKWrapper() + param_list = [p for p in model.parameters()] + hook_mgr = BaseParamHookMgr(param_list) + hook_mgr.register_backward_hooks(hookwrapper.wrapper_func()) + + model.zero_grad(set_to_none=True) + + with torch.cuda.amp.autocast(): + y = model(*inputs) + loss = y.sum() + loss.backward() + + if use_param_hook: + hook_mgr.remove_hooks() + return hookwrapper.hook_triggered_times + + model_copy = copy.deepcopy(model) + + run_model(model, inputs, False) + ret2 = run_model(model_copy, inputs, True) + + # Make sure param hook has only be fired once in case of parameter sharing + assert ret2 == len(list(model.parameters())) + + for p, p_copy in zip(model.parameters(), model_copy.parameters()): + assert allclose(p.grad, p_copy.grad), f"{p.grad} vs {p_copy.grad}" + + +if __name__ == '__main__': + test_base_param_hook()