Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion applications/Chat/coati/kernels/opt_attn.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def forward(
scale=self.scaling)

# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)

attn_output = self.out_proj(attn_output)
Expand Down
2 changes: 1 addition & 1 deletion colossalai/communication/p2p.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def recv_backward(output_grad_shape,
next_rank (int, optional): The rank of the source of the tensor.

Returns:
Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor or gradident tensor list.
Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor or gradient tensor list.
"""
if gpc.is_pipeline_last_stage():
output_tensor_grad = None
Expand Down
2 changes: 1 addition & 1 deletion colossalai/communication/p2p_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@


def init_process_group():
"""intialise process group by dist.new_group in the adjacent stages
"""initialise process group by dist.new_group in the adjacent stages

Args:
None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,11 @@ def init_dist_group(self):

parallel_setting = []

local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode = \
local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode = \
self._sequence_initializer.init_dist_group()
# change mode to sequence
mode = ParallelMode.SEQUENCE

parallel_setting.append((local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode))
parallel_setting.append((local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode))
parallel_setting.append(self._sequence_dp_initializer.init_dist_group())
return parallel_setting
2 changes: 1 addition & 1 deletion examples/tutorial/new_api/cifar_resnet/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@


def build_dataloader(batch_size: int, coordinator: DistCoordinator, plugin: DPPluginBase):
# trainsform
# transform
transform_train = transforms.Compose(
[transforms.Pad(4),
transforms.RandomHorizontalFlip(),
Expand Down
6 changes: 3 additions & 3 deletions examples/tutorial/new_api/cifar_vit/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
# Prepare Hyperparameters
# ==============================
NUM_EPOCHS = 60
WARMUP_EPOCSH = 5
WARMUP_EPOCHS = 5
LEARNING_RATE = 1e-3


Expand All @@ -37,7 +37,7 @@ def vit_cifar(**kwargs):


def build_dataloader(batch_size: int, coordinator: DistCoordinator, plugin: DPPluginBase):
# trainsform
# transform
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
Expand Down Expand Up @@ -183,7 +183,7 @@ def main():
optimizer = HybridAdam(model.parameters(), lr=LEARNING_RATE)

# lr scheduler
lr_scheduler = LinearWarmupLR(optimizer, NUM_EPOCHS, WARMUP_EPOCSH)
lr_scheduler = LinearWarmupLR(optimizer, NUM_EPOCHS, WARMUP_EPOCHS)

# ==============================
# Boost with ColossalAI
Expand Down
2 changes: 1 addition & 1 deletion op_builder/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def get_cuda_version_in_pytorch() -> List[int]:
torch_cuda_minor = torch.version.cuda.split(".")[1]
except:
raise ValueError(
"[extension] Cannot retrive the CUDA version in the PyTorch binary given by torch.version.cuda")
"[extension] Cannot retrieve the CUDA version in the PyTorch binary given by torch.version.cuda")
return torch_cuda_major, torch_cuda_minor


Expand Down
10 changes: 5 additions & 5 deletions tests/components_to_test/albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def bert_model_builder(checkpoint: bool = False):
print('building AlbertForSequenceClassification model')

# adapting huggingface BertForSequenceClassification for single unitest calling interface
class ModelAaptor(AlbertForSequenceClassification):
class ModelAdaptor(AlbertForSequenceClassification):

def forward(self, input_ids, labels):
"""
Expand All @@ -37,23 +37,23 @@ def forward(self, input_ids, labels):
"""
return super().forward(input_ids=input_ids, labels=labels)[0]

model = ModelAaptor(config)
model = ModelAdaptor(config)
# if checkpoint and version.parse(transformers.__version__) >= version.parse("4.11.0"):
# model.gradient_checkpointing_enable()

return model

is_distrbuted = torch.distributed.is_initialized()
is_distributed = torch.distributed.is_initialized()
trainloader = get_bert_data_loader(n_class=vocab_size,
batch_size=2,
total_samples=10000,
sequence_length=sequence_length,
is_distrbuted=is_distrbuted)
is_distributed=is_distributed)
testloader = get_bert_data_loader(n_class=vocab_size,
batch_size=2,
total_samples=10000,
sequence_length=sequence_length,
is_distrbuted=is_distrbuted)
is_distributed=is_distributed)

criterion = None
return bert_model_builder, trainloader, testloader, torch.optim.Adam, criterion
4 changes: 2 additions & 2 deletions tests/components_to_test/beit.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def generate(self):
@non_distributed_component_funcs.register(name='beit')
def get_training_components():

def model_buider(checkpoint=False):
def model_builder(checkpoint=False):
model = Beit(img_size=DummyDataLoader.img_size,
num_classes=DummyDataLoader.num_class,
embed_dim=32,
Expand All @@ -39,4 +39,4 @@ def model_buider(checkpoint=False):
testloader = DummyDataLoader()

criterion = torch.nn.CrossEntropyLoss()
return model_buider, trainloader, testloader, torch.optim.Adam, criterion
return model_builder, trainloader, testloader, torch.optim.Adam, criterion
16 changes: 8 additions & 8 deletions tests/components_to_test/bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def get_bert_data_loader(
total_samples,
sequence_length,
device=torch.device('cpu:0'),
is_distrbuted=False,
is_distributed=False,
):
train_data = torch.randint(
low=0,
Expand All @@ -24,7 +24,7 @@ def get_bert_data_loader(
)
train_label = torch.randint(low=0, high=2, size=(total_samples,), device=device, dtype=torch.long)
train_dataset = torch.utils.data.TensorDataset(train_data, train_label)
if is_distrbuted:
if is_distributed:
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
sampler = SequentialSampler(train_dataset)
Expand Down Expand Up @@ -52,8 +52,8 @@ def bert_model_builder(checkpoint: bool = False):
attention_probs_dropout_prob=0.)
print('building BertForSequenceClassification model')

# adapting huggingface BertForSequenceClassification for single unitest calling interface
class ModelAaptor(BertForSequenceClassification):
# adapting huggingface BertForSequenceClassification for single unittest calling interface
class ModelAdaptor(BertForSequenceClassification):

def forward(self, input_ids, labels):
"""
Expand All @@ -62,23 +62,23 @@ def forward(self, input_ids, labels):
"""
return super().forward(input_ids=input_ids, labels=labels)[0]

model = ModelAaptor(config)
model = ModelAdaptor(config)
if checkpoint and version.parse(transformers.__version__) >= version.parse("4.11.0"):
model.gradient_checkpointing_enable()

return model

is_distrbuted = torch.distributed.is_initialized()
is_distributed = torch.distributed.is_initialized()
trainloader = get_bert_data_loader(n_class=vocab_size,
batch_size=2,
total_samples=10000,
sequence_length=sequence_length,
is_distrbuted=is_distrbuted)
is_distributed=is_distributed)
testloader = get_bert_data_loader(n_class=vocab_size,
batch_size=2,
total_samples=10000,
sequence_length=sequence_length,
is_distrbuted=is_distrbuted)
is_distributed=is_distributed)

criterion = None
return bert_model_builder, trainloader, testloader, torch.optim.Adam, criterion
8 changes: 4 additions & 4 deletions tests/components_to_test/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@ def __init__(self):
def register(self, name):
assert name not in self._registry

def _regsiter(callable_):
def _register(callable_):
self._registry[name] = callable_

return _regsiter
return _register

def get_callable(self, name: str):
return self._registry[name]
Expand All @@ -34,6 +34,6 @@ def __next__(self):


non_distributed_component_funcs = Registry()
model_paralle_component_funcs = Registry()
model_parallel_component_funcs = Registry()

__all__ = ['non_distributed_component_funcs', 'model_paralle_component_funcs']
__all__ = ['non_distributed_component_funcs', 'model_parallel_component_funcs']
2 changes: 1 addition & 1 deletion tests/test_utils/test_activation_checkpointing.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def test_activation_checkpointing(cpu_offload, use_reentrant):
# other tests might affect this test
reset_seeds()

# We put initilization here to avoid change cuda rng state below
# We put initialization here to avoid change cuda rng state below
inputs = torch.rand(2, 2, requires_grad=True, device='cuda')
weight = torch.rand(2, 4, requires_grad=True, device='cuda')

Expand Down
22 changes: 11 additions & 11 deletions tests/test_utils/test_checkpoint_io/test_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def check_model_state_dict(a: Dict[str, Tensor], b: Dict[str, Tensor]) -> None:
assert torch.equal(v, b[k])


def check_optim_state_dict(a: dict, b: dict, ignore_param_gruops: bool = False) -> None:
def check_optim_state_dict(a: dict, b: dict, ignore_param_groups: bool = False) -> None:
assert set(a['state'].keys()) == set(b['state'].keys())
for k, state in a['state'].items():
b_state = b['state'][k]
Expand All @@ -32,7 +32,7 @@ def check_optim_state_dict(a: dict, b: dict, ignore_param_gruops: bool = False)
assert torch.equal(v1, v2)
else:
assert v1 == v2
if not ignore_param_gruops:
if not ignore_param_groups:
assert a['param_groups'] == b['param_groups']


Expand Down Expand Up @@ -129,23 +129,23 @@ def launch_dist(fn, world_size: int):


def save_dist(dir_name: str, zero: bool):
model, optmizer = prepare_model_optim(shard=True, zero=zero)
reset_model_optim(model, optmizer)
model, optimizer = prepare_model_optim(shard=True, zero=zero)
reset_model_optim(model, optimizer)
world_size = dist.get_world_size()
rank = dist.get_rank()
save(dir_name, model, optmizer, dist_meta=get_dist_metas(world_size, zero)[rank])
save(dir_name, model, optimizer, dist_meta=get_dist_metas(world_size, zero)[rank])


def load_and_check_dist(dir_name: str):
world_size = dist.get_world_size()
model, optmizer = prepare_model_optim(shard=True)
reset_model_optim(model, optmizer)
model, optimizer = prepare_model_optim(shard=True)
reset_model_optim(model, optimizer)
model_state_dict = deepcopy(model.state_dict())
optimizer_state_dict = deepcopy(optmizer.state_dict())
reset_model_optim(model, optmizer, 1)
load(dir_name, model, optmizer, get_redist_meta(world_size), get_dist_metas(world_size))
optimizer_state_dict = deepcopy(optimizer.state_dict())
reset_model_optim(model, optimizer, 1)
load(dir_name, model, optimizer, get_redist_meta(world_size), get_dist_metas(world_size))
check_model_state_dict(model_state_dict, model.state_dict())
check_optim_state_dict(optimizer_state_dict, optmizer.state_dict())
check_optim_state_dict(optimizer_state_dict, optimizer.state_dict())


@pytest.mark.dist
Expand Down
4 changes: 2 additions & 2 deletions tests/test_utils/test_checkpoint_io/test_merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def run_dist(rank, world_size, port, test_fn):


def run_save_dist(dir_name: str, zero: bool):
model, optmizer = prepare_model_optim(shard=True, zero=zero)
model, optimizer = prepare_model_optim(shard=True, zero=zero)
rank = dist.get_rank()
dp_world_size = dist.get_world_size() // 2
if not zero:
Expand All @@ -90,7 +90,7 @@ def run_save_dist(dir_name: str, zero: bool):
'fc.bias':
ParamDistMeta(rank // 2, dp_world_size, 0, 1, zero_numel=1, zero_orig_shape=[1])
}
save(dir_name, model, optmizer, dist_meta=dist_metas)
save(dir_name, model, optimizer, dist_meta=dist_metas)


@pytest.mark.dist
Expand Down
4 changes: 2 additions & 2 deletions tests/test_utils/test_checkpoint_io/test_redist.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,9 @@ def run_dist(rank, world_size, port, test_fn):


def run_save_dist(dir_name: str, zero: bool):
model, optmizer = prepare_model_optim(shard=True, zero=zero)
model, optimizer = prepare_model_optim(shard=True, zero=zero)
rank = dist.get_rank()
save(dir_name, model, optmizer, dist_meta=get_dist_metas(4, zero)[rank])
save(dir_name, model, optimizer, dist_meta=get_dist_metas(4, zero)[rank])


@pytest.mark.dist
Expand Down
8 changes: 4 additions & 4 deletions tests/test_utils/test_checkpoint_io/test_save.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def check_model_state_dict(a: Dict[str, Tensor], b: Dict[str, Tensor]) -> None:
assert torch.equal(v, b[k])


def check_optim_state_dict(a: dict, b: dict, ignore_param_gruops: bool = False) -> None:
def check_optim_state_dict(a: dict, b: dict, ignore_param_groups: bool = False) -> None:
assert set(a['state'].keys()) == set(b['state'].keys())
for k, state in a['state'].items():
b_state = b['state'][k]
Expand All @@ -37,7 +37,7 @@ def check_optim_state_dict(a: dict, b: dict, ignore_param_gruops: bool = False)
assert torch.equal(v1, v2)
else:
assert v1 == v2
if not ignore_param_gruops:
if not ignore_param_groups:
assert a['param_groups'] == b['param_groups']


Expand Down Expand Up @@ -113,12 +113,12 @@ def run_dist(rank, world_size, port, test_fn):


def run_save_dist(dir_name):
model, optmizer = prepare_model_optim()
model, optimizer = prepare_model_optim()
dist_metas = {
'fc.weight': ParamDistMeta(dist.get_rank(), dist.get_world_size(), 0, 1),
'fc.bias': ParamDistMeta(dist.get_rank(), dist.get_world_size(), 0, 1)
}
save(dir_name, model, optmizer, dist_meta=dist_metas)
save(dir_name, model, optimizer, dist_meta=dist_metas)


@pytest.mark.dist
Expand Down
4 changes: 2 additions & 2 deletions tests/test_utils/test_lazy_init/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def set_seed(seed: int) -> None:
torch.manual_seed(seed)


def assert_model_eqaual(m1: torch.nn.Module, m2: torch.nn.Module) -> None:
def assert_model_equal(m1: torch.nn.Module, m2: torch.nn.Module) -> None:
s1 = m1.state_dict()
s2 = m2.state_dict()

Expand Down Expand Up @@ -63,7 +63,7 @@ def check_lazy_init(entry: TestingEntry, seed: int = 42, verbose: bool = False,
with ctx:
deferred_model = model_fn()
deferred_model = ctx.materialize(deferred_model, verbose=verbose)
assert_model_eqaual(model, deferred_model)
assert_model_equal(model, deferred_model)
if check_forward:
assert_forward_equal(model, deferred_model, data_gen_fn, output_transform_fn)
if verbose:
Expand Down