From ae2c2f75c69e73593a56ed47429b78cf6b65a7f1 Mon Sep 17 00:00:00 2001 From: digger-yu Date: Fri, 5 May 2023 19:25:16 +0800 Subject: [PATCH 1/4] fix spelling error with examples/comminity/ --- .../Chat/examples/community/peft/easy_dataset.py | 10 +++++----- .../Chat/examples/community/peft/train_peft_prompts.py | 2 +- .../Chat/examples/community/peft/train_peft_sft.py | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/applications/Chat/examples/community/peft/easy_dataset.py b/applications/Chat/examples/community/peft/easy_dataset.py index 24ea4f0a8618..2fe293957079 100644 --- a/applications/Chat/examples/community/peft/easy_dataset.py +++ b/applications/Chat/examples/community/peft/easy_dataset.py @@ -188,7 +188,7 @@ def __init__(self, data_file: str, tokenizer: AutoTokenizer, max_length=512, is_ else: raw_input_ids.append(encoded_ids) - grouped_inpup_ids = [] + grouped_input_ids = [] current_input_ids = [] attention_mask = [] if tokenizer.pad_token_id is None: @@ -199,7 +199,7 @@ def __init__(self, data_file: str, tokenizer: AutoTokenizer, max_length=512, is_ #pad the current_input_ids to max_length with tokenizer.pad_token_id padded_length = max_length - len(current_input_ids) current_input_ids.extend([tokenizer.pad_token_id] * padded_length) - grouped_inpup_ids.append(torch.tensor(current_input_ids, dtype=torch.long)) + grouped_input_ids.append(torch.tensor(current_input_ids, dtype=torch.long)) attention_mask.append( torch.tensor([1] * (max_length - padded_length) + [0] * padded_length, dtype=torch.long)) current_input_ids = [] @@ -208,7 +208,7 @@ def __init__(self, data_file: str, tokenizer: AutoTokenizer, max_length=512, is_ if len(current_input_ids) > 0: padded_length = max_length - len(current_input_ids) current_input_ids.extend([tokenizer.pad_token_id] * padded_length) - grouped_inpup_ids.append(torch.tensor(current_input_ids, dtype=torch.long)) + grouped_input_ids.append(torch.tensor(current_input_ids, dtype=torch.long)) attention_mask.append( torch.tensor([1] * (max_length - padded_length) + [0] * padded_length, dtype=torch.long)) else: @@ -218,8 +218,8 @@ def __init__(self, data_file: str, tokenizer: AutoTokenizer, max_length=512, is_ input_ids.extend([tokenizer.pad_token_id] * padded_length) attention_mask.append( torch.tensor([1] * (max_length - padded_length) + [0] * padded_length, dtype=torch.long)) - grouped_inpup_ids.append(torch.tensor(input_ids, dtype=torch.long)) - self.input_ids = grouped_inpup_ids + grouped_input_ids.append(torch.tensor(input_ids, dtype=torch.long)) + self.input_ids = grouped_input_ids self.labels = copy.deepcopy(self.input_ids) self.file_name = data_file self.attention_mask = attention_mask diff --git a/applications/Chat/examples/community/peft/train_peft_prompts.py b/applications/Chat/examples/community/peft/train_peft_prompts.py index 0e277021e917..ba8470f38fad 100644 --- a/applications/Chat/examples/community/peft/train_peft_prompts.py +++ b/applications/Chat/examples/community/peft/train_peft_prompts.py @@ -41,7 +41,7 @@ def main(args): # configure model if args.model == 'bloom': # initial_model = BLOOMActor(pretrained=args.pretrain) - print('Using peft lora to load Bloom model as inital_model') + print('Using peft lora to load Bloom model as initial_model') initial_model = BLOOMActor(pretrained=args.pretrain, lora_path=args.sft_lora_path) print('Using peft lora to load Bloom model as initial_model (Done)') else: diff --git a/applications/Chat/examples/community/peft/train_peft_sft.py b/applications/Chat/examples/community/peft/train_peft_sft.py index 9bd0ebc12a83..d2b08b72ca95 100644 --- a/applications/Chat/examples/community/peft/train_peft_sft.py +++ b/applications/Chat/examples/community/peft/train_peft_sft.py @@ -86,7 +86,7 @@ def train(args): if args.strategy == 'colossalai_gemini': # this is a hack to deal with the resized embedding - # to make sure all parameters are ColoParameter for Colossal-AI Gemini Compatiblity + # to make sure all parameters are ColoParameter for Colossal-AI Gemini Compatibility for name, param in model.named_parameters(): if not isinstance(param, ColoParameter): sub_module_name = '.'.join(name.split('.')[:-1]) From 9505251b810bab2d05d7a2cc01282c8929a3e60c Mon Sep 17 00:00:00 2001 From: digger-yu Date: Sat, 6 May 2023 16:04:49 +0800 Subject: [PATCH 2/4] fix spelling error with tests/ --- op_builder/utils.py | 2 +- tests/components_to_test/albert.py | 10 +++++----- tests/components_to_test/beit.py | 4 ++-- tests/components_to_test/bert.py | 16 ++++++++-------- tests/components_to_test/registry.py | 8 ++++---- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/op_builder/utils.py b/op_builder/utils.py index 1b1bd5f49970..2dbd976fbcbb 100644 --- a/op_builder/utils.py +++ b/op_builder/utils.py @@ -36,7 +36,7 @@ def get_cuda_version_in_pytorch() -> List[int]: torch_cuda_minor = torch.version.cuda.split(".")[1] except: raise ValueError( - "[extension] Cannot retrive the CUDA version in the PyTorch binary given by torch.version.cuda") + "[extension] Cannot retrieve the CUDA version in the PyTorch binary given by torch.version.cuda") return torch_cuda_major, torch_cuda_minor diff --git a/tests/components_to_test/albert.py b/tests/components_to_test/albert.py index d5b6bc89a83e..52b2275ec4f8 100644 --- a/tests/components_to_test/albert.py +++ b/tests/components_to_test/albert.py @@ -28,7 +28,7 @@ def bert_model_builder(checkpoint: bool = False): print('building AlbertForSequenceClassification model') # adapting huggingface BertForSequenceClassification for single unitest calling interface - class ModelAaptor(AlbertForSequenceClassification): + class ModelAdaptor(AlbertForSequenceClassification): def forward(self, input_ids, labels): """ @@ -37,23 +37,23 @@ def forward(self, input_ids, labels): """ return super().forward(input_ids=input_ids, labels=labels)[0] - model = ModelAaptor(config) + model = ModelAdaptor(config) # if checkpoint and version.parse(transformers.__version__) >= version.parse("4.11.0"): # model.gradient_checkpointing_enable() return model - is_distrbuted = torch.distributed.is_initialized() + is_distributed = torch.distributed.is_initialized() trainloader = get_bert_data_loader(n_class=vocab_size, batch_size=2, total_samples=10000, sequence_length=sequence_length, - is_distrbuted=is_distrbuted) + is_distributed=is_distributed) testloader = get_bert_data_loader(n_class=vocab_size, batch_size=2, total_samples=10000, sequence_length=sequence_length, - is_distrbuted=is_distrbuted) + is_distributed=is_distributed) criterion = None return bert_model_builder, trainloader, testloader, torch.optim.Adam, criterion diff --git a/tests/components_to_test/beit.py b/tests/components_to_test/beit.py index 1252071f4075..2021ae6f6e35 100644 --- a/tests/components_to_test/beit.py +++ b/tests/components_to_test/beit.py @@ -27,7 +27,7 @@ def generate(self): @non_distributed_component_funcs.register(name='beit') def get_training_components(): - def model_buider(checkpoint=False): + def model_builder(checkpoint=False): model = Beit(img_size=DummyDataLoader.img_size, num_classes=DummyDataLoader.num_class, embed_dim=32, @@ -39,4 +39,4 @@ def model_buider(checkpoint=False): testloader = DummyDataLoader() criterion = torch.nn.CrossEntropyLoss() - return model_buider, trainloader, testloader, torch.optim.Adam, criterion + return model_builder, trainloader, testloader, torch.optim.Adam, criterion diff --git a/tests/components_to_test/bert.py b/tests/components_to_test/bert.py index c1faa6f9d892..e7d1d50806b8 100644 --- a/tests/components_to_test/bert.py +++ b/tests/components_to_test/bert.py @@ -13,7 +13,7 @@ def get_bert_data_loader( total_samples, sequence_length, device=torch.device('cpu:0'), - is_distrbuted=False, + is_distributed=False, ): train_data = torch.randint( low=0, @@ -24,7 +24,7 @@ def get_bert_data_loader( ) train_label = torch.randint(low=0, high=2, size=(total_samples,), device=device, dtype=torch.long) train_dataset = torch.utils.data.TensorDataset(train_data, train_label) - if is_distrbuted: + if is_distributed: sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: sampler = SequentialSampler(train_dataset) @@ -52,8 +52,8 @@ def bert_model_builder(checkpoint: bool = False): attention_probs_dropout_prob=0.) print('building BertForSequenceClassification model') - # adapting huggingface BertForSequenceClassification for single unitest calling interface - class ModelAaptor(BertForSequenceClassification): + # adapting huggingface BertForSequenceClassification for single unittest calling interface + class ModelAdaptor(BertForSequenceClassification): def forward(self, input_ids, labels): """ @@ -62,23 +62,23 @@ def forward(self, input_ids, labels): """ return super().forward(input_ids=input_ids, labels=labels)[0] - model = ModelAaptor(config) + model = ModelAdaptor(config) if checkpoint and version.parse(transformers.__version__) >= version.parse("4.11.0"): model.gradient_checkpointing_enable() return model - is_distrbuted = torch.distributed.is_initialized() + is_distributed = torch.distributed.is_initialized() trainloader = get_bert_data_loader(n_class=vocab_size, batch_size=2, total_samples=10000, sequence_length=sequence_length, - is_distrbuted=is_distrbuted) + is_distributed=is_distributed) testloader = get_bert_data_loader(n_class=vocab_size, batch_size=2, total_samples=10000, sequence_length=sequence_length, - is_distrbuted=is_distrbuted) + is_distributed=is_distributed) criterion = None return bert_model_builder, trainloader, testloader, torch.optim.Adam, criterion diff --git a/tests/components_to_test/registry.py b/tests/components_to_test/registry.py index 728ed9eba6ea..edfcaaa7275b 100644 --- a/tests/components_to_test/registry.py +++ b/tests/components_to_test/registry.py @@ -9,10 +9,10 @@ def __init__(self): def register(self, name): assert name not in self._registry - def _regsiter(callable_): + def _register(callable_): self._registry[name] = callable_ - return _regsiter + return _register def get_callable(self, name: str): return self._registry[name] @@ -34,6 +34,6 @@ def __next__(self): non_distributed_component_funcs = Registry() -model_paralle_component_funcs = Registry() +model_parallel_component_funcs = Registry() -__all__ = ['non_distributed_component_funcs', 'model_paralle_component_funcs'] +__all__ = ['non_distributed_component_funcs', 'model_parallel_component_funcs'] From dc12baea19fe2ef0b0cbf85758d4fc4efb4d323e Mon Sep 17 00:00:00 2001 From: digger-yu Date: Mon, 8 May 2023 14:30:17 +0800 Subject: [PATCH 3/4] fix some spelling error with tests/ colossalai/ etc. --- applications/Chat/coati/kernels/opt_attn.py | 2 +- colossalai/communication/p2p.py | 2 +- colossalai/communication/p2p_v2.py | 2 +- .../initializer_sequence.py | 4 ++-- .../tutorial/new_api/cifar_resnet/train.py | 2 +- examples/tutorial/new_api/cifar_vit/train.py | 6 ++--- .../test_activation_checkpointing.py | 2 +- .../test_checkpoint_io/test_load.py | 22 +++++++++---------- .../test_checkpoint_io/test_merge.py | 4 ++-- .../test_checkpoint_io/test_redist.py | 4 ++-- .../test_checkpoint_io/test_save.py | 8 +++---- tests/test_utils/test_lazy_init/utils.py | 4 ++-- 12 files changed, 31 insertions(+), 31 deletions(-) diff --git a/applications/Chat/coati/kernels/opt_attn.py b/applications/Chat/coati/kernels/opt_attn.py index c10f341e94a3..e99f9c2247d1 100644 --- a/applications/Chat/coati/kernels/opt_attn.py +++ b/applications/Chat/coati/kernels/opt_attn.py @@ -77,7 +77,7 @@ def forward( scale=self.scaling) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be - # partitioned aross GPUs when using tensor-parallelism. + # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) diff --git a/colossalai/communication/p2p.py b/colossalai/communication/p2p.py index 0200cd3c6553..1f20fca4f74d 100644 --- a/colossalai/communication/p2p.py +++ b/colossalai/communication/p2p.py @@ -217,7 +217,7 @@ def recv_backward(output_grad_shape, next_rank (int, optional): The rank of the source of the tensor. Returns: - Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor or gradident tensor list. + Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor or gradient tensor list. """ if gpc.is_pipeline_last_stage(): output_tensor_grad = None diff --git a/colossalai/communication/p2p_v2.py b/colossalai/communication/p2p_v2.py index 0dacd8c3c9b5..090311cb35f2 100644 --- a/colossalai/communication/p2p_v2.py +++ b/colossalai/communication/p2p_v2.py @@ -19,7 +19,7 @@ def init_process_group(): - """intialise process group by dist.new_group in the adjacent stages + """initialise process group by dist.new_group in the adjacent stages Args: None diff --git a/colossalai/context/process_group_initializer/initializer_sequence.py b/colossalai/context/process_group_initializer/initializer_sequence.py index eaacb14d2282..251a2940778a 100644 --- a/colossalai/context/process_group_initializer/initializer_sequence.py +++ b/colossalai/context/process_group_initializer/initializer_sequence.py @@ -91,11 +91,11 @@ def init_dist_group(self): parallel_setting = [] - local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode = \ + local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode = \ self._sequence_initializer.init_dist_group() # change mode to sequence mode = ParallelMode.SEQUENCE - parallel_setting.append((local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode)) + parallel_setting.append((local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode)) parallel_setting.append(self._sequence_dp_initializer.init_dist_group()) return parallel_setting diff --git a/examples/tutorial/new_api/cifar_resnet/train.py b/examples/tutorial/new_api/cifar_resnet/train.py index e64e95fc2baf..7c72180e4504 100644 --- a/examples/tutorial/new_api/cifar_resnet/train.py +++ b/examples/tutorial/new_api/cifar_resnet/train.py @@ -28,7 +28,7 @@ def build_dataloader(batch_size: int, coordinator: DistCoordinator, plugin: DPPluginBase): - # trainsform + # transform transform_train = transforms.Compose( [transforms.Pad(4), transforms.RandomHorizontalFlip(), diff --git a/examples/tutorial/new_api/cifar_vit/train.py b/examples/tutorial/new_api/cifar_vit/train.py index fee53df07086..d7d439c43e27 100644 --- a/examples/tutorial/new_api/cifar_vit/train.py +++ b/examples/tutorial/new_api/cifar_vit/train.py @@ -25,7 +25,7 @@ # Prepare Hyperparameters # ============================== NUM_EPOCHS = 60 -WARMUP_EPOCSH = 5 +WARMUP_EPOCHS = 5 LEARNING_RATE = 1e-3 @@ -37,7 +37,7 @@ def vit_cifar(**kwargs): def build_dataloader(batch_size: int, coordinator: DistCoordinator, plugin: DPPluginBase): - # trainsform + # transform transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), @@ -183,7 +183,7 @@ def main(): optimizer = HybridAdam(model.parameters(), lr=LEARNING_RATE) # lr scheduler - lr_scheduler = LinearWarmupLR(optimizer, NUM_EPOCHS, WARMUP_EPOCSH) + lr_scheduler = LinearWarmupLR(optimizer, NUM_EPOCHS, WARMUP_EPOCHS) # ============================== # Boost with ColossalAI diff --git a/tests/test_utils/test_activation_checkpointing.py b/tests/test_utils/test_activation_checkpointing.py index 59a8acd4b210..2930552cc4e7 100644 --- a/tests/test_utils/test_activation_checkpointing.py +++ b/tests/test_utils/test_activation_checkpointing.py @@ -51,7 +51,7 @@ def test_activation_checkpointing(cpu_offload, use_reentrant): # other tests might affect this test reset_seeds() - # We put initilization here to avoid change cuda rng state below + # We put initialization here to avoid change cuda rng state below inputs = torch.rand(2, 2, requires_grad=True, device='cuda') weight = torch.rand(2, 4, requires_grad=True, device='cuda') diff --git a/tests/test_utils/test_checkpoint_io/test_load.py b/tests/test_utils/test_checkpoint_io/test_load.py index b1a741515728..2949c9f0752d 100644 --- a/tests/test_utils/test_checkpoint_io/test_load.py +++ b/tests/test_utils/test_checkpoint_io/test_load.py @@ -23,7 +23,7 @@ def check_model_state_dict(a: Dict[str, Tensor], b: Dict[str, Tensor]) -> None: assert torch.equal(v, b[k]) -def check_optim_state_dict(a: dict, b: dict, ignore_param_gruops: bool = False) -> None: +def check_optim_state_dict(a: dict, b: dict, ignore_param_groups: bool = False) -> None: assert set(a['state'].keys()) == set(b['state'].keys()) for k, state in a['state'].items(): b_state = b['state'][k] @@ -32,7 +32,7 @@ def check_optim_state_dict(a: dict, b: dict, ignore_param_gruops: bool = False) assert torch.equal(v1, v2) else: assert v1 == v2 - if not ignore_param_gruops: + if not ignore_param_groups: assert a['param_groups'] == b['param_groups'] @@ -129,23 +129,23 @@ def launch_dist(fn, world_size: int): def save_dist(dir_name: str, zero: bool): - model, optmizer = prepare_model_optim(shard=True, zero=zero) - reset_model_optim(model, optmizer) + model, optimizer = prepare_model_optim(shard=True, zero=zero) + reset_model_optim(model, optimizer) world_size = dist.get_world_size() rank = dist.get_rank() - save(dir_name, model, optmizer, dist_meta=get_dist_metas(world_size, zero)[rank]) + save(dir_name, model, optimizer, dist_meta=get_dist_metas(world_size, zero)[rank]) def load_and_check_dist(dir_name: str): world_size = dist.get_world_size() - model, optmizer = prepare_model_optim(shard=True) - reset_model_optim(model, optmizer) + model, optimizer = prepare_model_optim(shard=True) + reset_model_optim(model, optimizer) model_state_dict = deepcopy(model.state_dict()) - optimizer_state_dict = deepcopy(optmizer.state_dict()) - reset_model_optim(model, optmizer, 1) - load(dir_name, model, optmizer, get_redist_meta(world_size), get_dist_metas(world_size)) + optimizer_state_dict = deepcopy(optimizer.state_dict()) + reset_model_optim(model, optimizer, 1) + load(dir_name, model, optimizer, get_redist_meta(world_size), get_dist_metas(world_size)) check_model_state_dict(model_state_dict, model.state_dict()) - check_optim_state_dict(optimizer_state_dict, optmizer.state_dict()) + check_optim_state_dict(optimizer_state_dict, optimizer.state_dict()) @pytest.mark.dist diff --git a/tests/test_utils/test_checkpoint_io/test_merge.py b/tests/test_utils/test_checkpoint_io/test_merge.py index 255c74adf0a2..07d4597f8391 100644 --- a/tests/test_utils/test_checkpoint_io/test_merge.py +++ b/tests/test_utils/test_checkpoint_io/test_merge.py @@ -68,7 +68,7 @@ def run_dist(rank, world_size, port, test_fn): def run_save_dist(dir_name: str, zero: bool): - model, optmizer = prepare_model_optim(shard=True, zero=zero) + model, optimizer = prepare_model_optim(shard=True, zero=zero) rank = dist.get_rank() dp_world_size = dist.get_world_size() // 2 if not zero: @@ -90,7 +90,7 @@ def run_save_dist(dir_name: str, zero: bool): 'fc.bias': ParamDistMeta(rank // 2, dp_world_size, 0, 1, zero_numel=1, zero_orig_shape=[1]) } - save(dir_name, model, optmizer, dist_meta=dist_metas) + save(dir_name, model, optimizer, dist_meta=dist_metas) @pytest.mark.dist diff --git a/tests/test_utils/test_checkpoint_io/test_redist.py b/tests/test_utils/test_checkpoint_io/test_redist.py index 144715bdfcca..fdc849a5ecc0 100644 --- a/tests/test_utils/test_checkpoint_io/test_redist.py +++ b/tests/test_utils/test_checkpoint_io/test_redist.py @@ -125,9 +125,9 @@ def run_dist(rank, world_size, port, test_fn): def run_save_dist(dir_name: str, zero: bool): - model, optmizer = prepare_model_optim(shard=True, zero=zero) + model, optimizer = prepare_model_optim(shard=True, zero=zero) rank = dist.get_rank() - save(dir_name, model, optmizer, dist_meta=get_dist_metas(4, zero)[rank]) + save(dir_name, model, optimizer, dist_meta=get_dist_metas(4, zero)[rank]) @pytest.mark.dist diff --git a/tests/test_utils/test_checkpoint_io/test_save.py b/tests/test_utils/test_checkpoint_io/test_save.py index e35e566f6ff8..2abdd95a6481 100644 --- a/tests/test_utils/test_checkpoint_io/test_save.py +++ b/tests/test_utils/test_checkpoint_io/test_save.py @@ -28,7 +28,7 @@ def check_model_state_dict(a: Dict[str, Tensor], b: Dict[str, Tensor]) -> None: assert torch.equal(v, b[k]) -def check_optim_state_dict(a: dict, b: dict, ignore_param_gruops: bool = False) -> None: +def check_optim_state_dict(a: dict, b: dict, ignore_param_groups: bool = False) -> None: assert set(a['state'].keys()) == set(b['state'].keys()) for k, state in a['state'].items(): b_state = b['state'][k] @@ -37,7 +37,7 @@ def check_optim_state_dict(a: dict, b: dict, ignore_param_gruops: bool = False) assert torch.equal(v1, v2) else: assert v1 == v2 - if not ignore_param_gruops: + if not ignore_param_groups: assert a['param_groups'] == b['param_groups'] @@ -113,12 +113,12 @@ def run_dist(rank, world_size, port, test_fn): def run_save_dist(dir_name): - model, optmizer = prepare_model_optim() + model, optimizer = prepare_model_optim() dist_metas = { 'fc.weight': ParamDistMeta(dist.get_rank(), dist.get_world_size(), 0, 1), 'fc.bias': ParamDistMeta(dist.get_rank(), dist.get_world_size(), 0, 1) } - save(dir_name, model, optmizer, dist_meta=dist_metas) + save(dir_name, model, optimizer, dist_meta=dist_metas) @pytest.mark.dist diff --git a/tests/test_utils/test_lazy_init/utils.py b/tests/test_utils/test_lazy_init/utils.py index a8aeb4c8930c..0b5f15ca5445 100644 --- a/tests/test_utils/test_lazy_init/utils.py +++ b/tests/test_utils/test_lazy_init/utils.py @@ -18,7 +18,7 @@ def set_seed(seed: int) -> None: torch.manual_seed(seed) -def assert_model_eqaual(m1: torch.nn.Module, m2: torch.nn.Module) -> None: +def assert_model_equal(m1: torch.nn.Module, m2: torch.nn.Module) -> None: s1 = m1.state_dict() s2 = m2.state_dict() @@ -63,7 +63,7 @@ def check_lazy_init(entry: TestingEntry, seed: int = 42, verbose: bool = False, with ctx: deferred_model = model_fn() deferred_model = ctx.materialize(deferred_model, verbose=verbose) - assert_model_eqaual(model, deferred_model) + assert_model_equal(model, deferred_model) if check_forward: assert_forward_equal(model, deferred_model, data_gen_fn, output_transform_fn) if verbose: From d2e7372459662e7834d2c6d3067868e96dad0127 Mon Sep 17 00:00:00 2001 From: digger yu Date: Wed, 10 May 2023 18:18:12 +0800 Subject: [PATCH 4/4] fix spelling error with tests/ etc. date:2023.5.10 --- tests/components_to_test/albert.py | 2 +- tests/test_booster/test_accelerator.py | 6 +++--- tests/test_booster/test_plugin/test_dp_plugin_base.py | 2 +- .../test_cifar_with_data_pipeline_tensor.py | 4 ++-- .../test_cifar_with_data_pipeline_tensor_v2.py | 4 ++-- .../test_codegen/test_activation_checkpoint_codegen.py | 4 ++-- .../test_nested_activation_checkpoint_codegen.py | 4 ++-- tests/test_fx/test_codegen/test_offload_codegen.py | 6 +++--- tests/test_layers/test_sequence/test_sequence.py | 2 +- tests/test_moe/test_kernel.py | 4 ++-- tests/test_tensor/model/test_model.py | 2 +- tests/test_trainer/test_pipeline/test_p2p.py | 2 +- tests/test_zero/test_gemini/test_chunkv2.py | 6 +++--- 13 files changed, 24 insertions(+), 24 deletions(-) diff --git a/tests/components_to_test/albert.py b/tests/components_to_test/albert.py index 52b2275ec4f8..8924eb2fbc92 100644 --- a/tests/components_to_test/albert.py +++ b/tests/components_to_test/albert.py @@ -27,7 +27,7 @@ def bert_model_builder(checkpoint: bool = False): attention_probs_dropout_prob=0.) print('building AlbertForSequenceClassification model') - # adapting huggingface BertForSequenceClassification for single unitest calling interface + # adapting huggingface BertForSequenceClassification for single unittest calling interface class ModelAdaptor(AlbertForSequenceClassification): def forward(self, input_ids, labels): diff --git a/tests/test_booster/test_accelerator.py b/tests/test_booster/test_accelerator.py index 895c494d0c17..6f3f66ed41b8 100644 --- a/tests/test_booster/test_accelerator.py +++ b/tests/test_booster/test_accelerator.py @@ -7,8 +7,8 @@ @clear_cache_before_run() @parameterize('device', ['cpu', 'cuda']) def test_accelerator(device): - acceleartor = Accelerator(device) + accelerator = Accelerator(device) model = nn.Linear(8, 8) - model = acceleartor.configure_model(model) + model = accelerator.configure_model(model) assert next(model.parameters()).device.type == device - del model, acceleartor + del model, accelerator diff --git a/tests/test_booster/test_plugin/test_dp_plugin_base.py b/tests/test_booster/test_plugin/test_dp_plugin_base.py index 61aeded12203..689b334cae50 100644 --- a/tests/test_booster/test_plugin/test_dp_plugin_base.py +++ b/tests/test_booster/test_plugin/test_dp_plugin_base.py @@ -56,7 +56,7 @@ def no_sync(self, model: nn.Module) -> Iterator[None]: def check_dataloader_sharding(): plugin = DPPluginWrapper() - # create a custom dasetset with 0 to 10 + # create a custom dataset with 0 to 10 dataset = TensorDataset(torch.arange(0, 10)) train_dataloader = plugin.prepare_dataloader(dataset, batch_size=2) diff --git a/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor.py b/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor.py index 4d63592f12b0..4992acbd7cc2 100644 --- a/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor.py +++ b/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor.py @@ -48,7 +48,7 @@ def run_trainer(rank, world_size, port): pipelinable.policy = "uniform" model = pipelinable.partition(1, gpc.pipeline_parallel_size, gpc.get_local_rank(ParallelMode.PIPELINE)) - # craete dataloaders + # create dataloaders root = Path(os.environ['DATA']) transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4, pad_if_needed=True), @@ -68,7 +68,7 @@ def run_trainer(rank, world_size, port): # create lr scheduler lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer, total_steps=NUM_EPOCHS, warmup_steps=WARMUP_EPOCHS) - # intiailize + # initialize engine, train_dataloader, *_ = colossalai.initialize(model=model, optimizer=optimizer, criterion=criterion, diff --git a/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor_v2.py b/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor_v2.py index 67d2ba5f5d98..62bbb8f50391 100644 --- a/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor_v2.py +++ b/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor_v2.py @@ -50,7 +50,7 @@ def run_trainer(rank, world_size, port): pipelinable.policy = "uniform" model = pipelinable.partition(1, gpc.pipeline_parallel_size, gpc.get_local_rank(ParallelMode.PIPELINE)) - # craete dataloaders + # create dataloaders root = Path(os.environ['DATA']) transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4, pad_if_needed=True), @@ -70,7 +70,7 @@ def run_trainer(rank, world_size, port): # create lr scheduler lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer, total_steps=NUM_EPOCHS, warmup_steps=WARMUP_EPOCHS) - # intiailize + # initialize engine, train_dataloader, *_ = colossalai.initialize(model=model, optimizer=optimizer, criterion=criterion, diff --git a/tests/test_fx/test_codegen/test_activation_checkpoint_codegen.py b/tests/test_fx/test_codegen/test_activation_checkpoint_codegen.py index ab483f7e47a3..bcac2ec426d9 100644 --- a/tests/test_fx/test_codegen/test_activation_checkpoint_codegen.py +++ b/tests/test_fx/test_codegen/test_activation_checkpoint_codegen.py @@ -64,7 +64,7 @@ def forward(self, x, y): def _run_act_ckpt_codegen(rank, world_size, port): - # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly + # launch colossalai to make sure we could execute colossalai.utils.checkpoint currently colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model and run forward @@ -122,7 +122,7 @@ def test_act_ckpt_codegen(): def _run_act_ckpt_python_code_torch11(rank, world_size, port): - # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly + # launch colossalai to make sure we could execute colossalai.utils.checkpoint currently colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model and run forward diff --git a/tests/test_fx/test_codegen/test_nested_activation_checkpoint_codegen.py b/tests/test_fx/test_codegen/test_nested_activation_checkpoint_codegen.py index 9064023d4f68..5b327807a57b 100644 --- a/tests/test_fx/test_codegen/test_nested_activation_checkpoint_codegen.py +++ b/tests/test_fx/test_codegen/test_nested_activation_checkpoint_codegen.py @@ -32,7 +32,7 @@ def forward(self, x): def _run_act_ckpt_codegen(rank, world_size, port): - # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly + # launch colossalai to make sure we could execute colossalai.utils.checkpoint currently colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model and run forward @@ -89,7 +89,7 @@ def test_act_ckpt_codegen(): def _run_act_ckpt_python_code_torch11(rank, world_size, port): - # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly + # launch colossalai to make sure we could execute colossalai.utils.checkpoint currently colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model and run forward diff --git a/tests/test_fx/test_codegen/test_offload_codegen.py b/tests/test_fx/test_codegen/test_offload_codegen.py index 96e88eb92b33..c217b96586fe 100644 --- a/tests/test_fx/test_codegen/test_offload_codegen.py +++ b/tests/test_fx/test_codegen/test_offload_codegen.py @@ -56,7 +56,7 @@ def _test_fwd_and_bwd(model: torch.nn.Module, gm: ColoGraphModule, data: torch.T fx_out = gm(data) assert torch.equal(non_fx_out, fx_out), "fx_out doesn't comply with original output" - # test barckward + # test backward loss0 = non_fx_out.sum() loss0.backward() loss1 = fx_out.sum() @@ -65,7 +65,7 @@ def _test_fwd_and_bwd(model: torch.nn.Module, gm: ColoGraphModule, data: torch.T def _run_offload_codegen(rank, world_size, port): - # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly + # launch colossalai to make sure we could execute colossalai.utils.checkpoint currently colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model and input @@ -120,7 +120,7 @@ def test_act_ckpt_codegen(): def _run_offload_codegen_torch11(rank, world_size, port): - # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly + # launch colossalai to make sure we could execute colossalai.utils.checkpoint currently colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model and input diff --git a/tests/test_layers/test_sequence/test_sequence.py b/tests/test_layers/test_sequence/test_sequence.py index aac192d7eff0..60f2d55f43af 100644 --- a/tests/test_layers/test_sequence/test_sequence.py +++ b/tests/test_layers/test_sequence/test_sequence.py @@ -45,7 +45,7 @@ def check_ring_qk(rank, world_size): ring_qk = colossalai.nn.layer.parallel_sequence.RingQK.apply sub_a = ring_qk(sub_q, sub_k, batch_size, num_heads, sub_seq_length) - # check master and distributed attetion scores + # check master and distributed attention scores sub_master_a = a[:, rank * sub_seq_length:(rank + 1) * sub_seq_length] assert torch.allclose(sub_a, sub_master_a, rtol=1e-5, atol=1e-2) diff --git a/tests/test_moe/test_kernel.py b/tests/test_moe/test_kernel.py index ad9a172b72aa..39603c158731 100644 --- a/tests/test_moe/test_kernel.py +++ b/tests/test_moe/test_kernel.py @@ -41,7 +41,7 @@ def run_routing(rank, world_size, port, rs=2, hidden_size=128, data_type=torch.f if data_type == torch.float16: layer = layer.half() - # use matrix multiplication instead of COL_MOE_KERNL in MOE dispatch and combine + # use matrix multiplication instead of COL_MOE_KERNEL in MOE dispatch and combine layer.use_kernel = False old_out, _ = layer(tokens) ech = old_out.shape @@ -57,7 +57,7 @@ def run_routing(rank, world_size, port, rs=2, hidden_size=128, data_type=torch.f layer.gate_weight.grad.zero_() layer.use_kernel = True - new_out, _ = layer(tokens) # get ouputs through colossal kernel + new_out, _ = layer(tokens) # get outputs through colossal kernel if data_type == torch.float32: check_equal(old_out, new_out) diff --git a/tests/test_tensor/model/test_model.py b/tests/test_tensor/model/test_model.py index 79d70e53c5cb..288bd20e3844 100644 --- a/tests/test_tensor/model/test_model.py +++ b/tests/test_tensor/model/test_model.py @@ -329,6 +329,6 @@ def test_pretrain_load(world_size): if __name__ == '__main__': # test_model_parameters() - # test_colo_optgimizer() + # test_colo_optimizer() test_model(4) # test_pretrain_load(4) diff --git a/tests/test_trainer/test_pipeline/test_p2p.py b/tests/test_trainer/test_pipeline/test_p2p.py index cb7a193d2bfa..8ad366133d18 100644 --- a/tests/test_trainer/test_pipeline/test_p2p.py +++ b/tests/test_trainer/test_pipeline/test_p2p.py @@ -90,7 +90,7 @@ def run_check(rank, world_size, port): prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) logger.info('Rank {0}: prev rank {1}, next rank {2}'.format(rank, prev_rank, next_rank)) - logger.info('Distributed environment is initialzied.') + logger.info('Distributed environment is initialized.') check_comm(world_size, rank, prev_rank, next_rank, logger) gpc.destroy() diff --git a/tests/test_zero/test_gemini/test_chunkv2.py b/tests/test_zero/test_gemini/test_chunkv2.py index 16764aa6b0b1..1cb31b260a99 100644 --- a/tests/test_zero/test_gemini/test_chunkv2.py +++ b/tests/test_zero/test_gemini/test_chunkv2.py @@ -23,7 +23,7 @@ def add_param(param_list, param_cp_list, *args, **kwargs): param_cp_list.append(param.clone()) -def check_euqal(param, param_cp): +def check_equal(param, param_cp): if param.device != param_cp.device: temp = param.data.to(param_cp.device) else: @@ -57,7 +57,7 @@ def exam_chunk_basic(init_device, keep_gathered, pin_memory): my_chunk.append_tensor(param) assert my_chunk.utilized_size == 597 for param, param_cp in zip(param_list, param_cp_list): - check_euqal(param, param_cp) + check_equal(param, param_cp) my_chunk.close_chunk() if keep_gathered is False: @@ -77,7 +77,7 @@ def exam_chunk_basic(init_device, keep_gathered, pin_memory): my_chunk.access_chunk() assert my_chunk.device_type == 'cuda' for param, param_cp in zip(param_list, param_cp_list): - check_euqal(param, param_cp) + check_equal(param, param_cp) assert my_chunk.tensor_state_cnter[TensorState.HOLD] == 4 my_chunk.tensor_trans_state(param_list[0], TensorState.COMPUTE)