From 86be7440076de238bacdd4bf401513d44025c388 Mon Sep 17 00:00:00 2001 From: jiaruifang Date: Wed, 6 Jul 2022 17:34:24 +0800 Subject: [PATCH 1/4] make it faster --- tests/test_utils/test_colo_checkpoint.py | 39 +++++++++++++----------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/tests/test_utils/test_colo_checkpoint.py b/tests/test_utils/test_colo_checkpoint.py index 6e7d4441d760..48742fc18a58 100644 --- a/tests/test_utils/test_colo_checkpoint.py +++ b/tests/test_utils/test_colo_checkpoint.py @@ -1,21 +1,20 @@ from abc import ABC, abstractmethod -import os, sys, shutil +import os, shutil import torch import torch.nn as nn import pytest import copy -import operator -import colossalai -from colossalai.context.parallel_mode import ParallelMode +from functools import partial + import torch.multiprocessing as mp import torch.distributed as dist + +import colossalai from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils.cuda import get_current_device from colossalai.utils import free_port from colossalai.utils.model.colo_init_context import ColoInitContext -from colossalai.tensor import ColoTensorSpec, ComputePattern, ComputeSpec, DistSpecManager, distspec, ProcessGroup, ColoTensor -from colossalai.core import global_context as gpc -from functools import partial +from colossalai.tensor import ComputePattern, ComputeSpec, DistSpecManager, distspec, ProcessGroup from colossalai.nn.parallel.data_parallel import ColoDDP from colossalai.utils.checkpoint import save_checkpoint, load_checkpoint from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR @@ -46,15 +45,17 @@ def __len__(self): class DummyDataLoader(DummyDataGenerator): - batch_size = 128 - category = 16 - feature_size = 256 + + def __init__(self, batch_size, category, feature_size, length=10): + super().__init__(length) + self.batch_size = batch_size + self.category = category + self.feature_size = feature_size def generate(self): image_dict = {} - image_dict['pixel_values'] = torch.rand( - DummyDataLoader.batch_size, DummyDataLoader.feature_size, device=get_current_device()) * 2 - 1 - image_dict['label'] = torch.randint(DummyDataLoader.category, (DummyDataLoader.batch_size,), + image_dict['pixel_values'] = torch.rand(self.batch_size, self.feature_size, device=get_current_device()) * 2 - 1 + image_dict['label'] = torch.randint(self.category, (self.batch_size,), dtype=torch.int64, device=get_current_device()) return image_dict @@ -102,11 +103,15 @@ def remove(path): def run_checkpoint(init_spec_func, use_ddp, test_epoch, pg): - train_dataloader = DummyDataLoader(length=16) + batch = 3 + feature = 32 + category = 16 + train_dataloader = DummyDataLoader(batch, category, feature, length=16) with ColoInitContext(device=get_current_device()): - model = MLP(256, 16, 64) - model_reload = MLP(256, 16, 64) - model_ref = MLP(256, 16, 64) + model = MLP(feature, category) + model_reload = MLP(feature, category) + model_ref = MLP(feature, category) + model = model.cuda() model_reload = model_reload.cuda() model_ref = model_ref.cuda() From 9c504afabd4db00b254c96b44014a4ef3bcd0276 Mon Sep 17 00:00:00 2001 From: jiaruifang Date: Tue, 12 Jul 2022 23:01:25 +0800 Subject: [PATCH 2/4] [hotfix] torchvison fx tests --- tests/test_fx/test_pipeline/test_torchvision/test_torchvision.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_fx/test_pipeline/test_torchvision/test_torchvision.py b/tests/test_fx/test_pipeline/test_torchvision/test_torchvision.py index dab485063ab2..e52889e3be6c 100644 --- a/tests/test_fx/test_pipeline/test_torchvision/test_torchvision.py +++ b/tests/test_fx/test_pipeline/test_torchvision/test_torchvision.py @@ -10,6 +10,7 @@ import random import numpy as np import inspect +import pytest MANUAL_SEED = 0 random.seed(MANUAL_SEED) From 0488f3a6cf2968acf67bddf0a201dfa1db53ce34 Mon Sep 17 00:00:00 2001 From: jiaruifang Date: Tue, 12 Jul 2022 23:27:13 +0800 Subject: [PATCH 3/4] [hotfix] rename duplicated named test_gpt.py --- tests/test_tensor/{test_gpt.py => test_gpt2.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/test_tensor/{test_gpt.py => test_gpt2.py} (100%) diff --git a/tests/test_tensor/test_gpt.py b/tests/test_tensor/test_gpt2.py similarity index 100% rename from tests/test_tensor/test_gpt.py rename to tests/test_tensor/test_gpt2.py From c23dfbcd072bf4c771c923bbda68efbcd5940de0 Mon Sep 17 00:00:00 2001 From: jiaruifang Date: Tue, 12 Jul 2022 23:41:48 +0800 Subject: [PATCH 4/4] [hotfix] test model unittest hotfix --- tests/test_tensor/test_model.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tests/test_tensor/test_model.py b/tests/test_tensor/test_model.py index b44e7af0167c..3431336cb6cd 100644 --- a/tests/test_tensor/test_model.py +++ b/tests/test_tensor/test_model.py @@ -12,7 +12,7 @@ from colossalai.utils import free_port from colossalai.utils.model.colo_init_context import ColoInitContext from colossalai.tensor import ShardSpec, ColoTensorSpec, ComputePattern, \ - ComputeSpec, ColoTensor, DistSpecManager, ProcessGroup + ComputeSpec, ColoTensor, DistSpecManager, ProcessGroup, ReplicaSpec from colossalai.nn.optimizer import ColoOptimizer from tests.components_to_test.registry import non_distributed_component_funcs @@ -76,22 +76,23 @@ def run_1d_hybrid_tp(model_name): for name, p in model.named_parameters(): if not isinstance(p, ColoTensor): continue - # print(name) + # num_class = type_vocab_size = 2 | (8, 2) - # TODO(jiaruifang) has bug if open the following 2 comments if 'classifier' in name and 'weight' in name: init_1d_row_linear(p, pg) # num_class = vocab_size = 30524 | (30524, 8) - if 'word_embeddings' in name and 'weight' in name: + elif 'word_embeddings' in name and 'weight' in name: init_1d_row_embedding(p, pg) # num_class = seq_len = 512 | (512, 8) - if 'position_embeddings' in name and 'weight' in name: + elif 'position_embeddings' in name and 'weight' in name: init_1d_row_embedding(p, pg) # num_class = type_vocab_size = 2 | (2, 8) - if 'token_type_embeddings' in name and 'weight' in name: + elif 'token_type_embeddings' in name and 'weight' in name: init_1d_col_embedding(p, pg) - if p.process_group.tp_world_size() == 1: - p.set_process_group(pg) + elif p.process_group.tp_world_size() == 1: + with DistSpecManager.no_grad(): + p.redistribute(ReplicaSpec(), pg) + elif "simple_net" == model_name: # A naive way to set spec for all weights in Linear for name, p in model.named_parameters():