-
Notifications
You must be signed in to change notification settings - Fork 4.5k
Feature/chatglm #4240
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
flybird11111
merged 15 commits into
hpcaitech:feature/shardformer-models
from
klhhhhh:feature/chatglm
Jul 20, 2023
Merged
Feature/chatglm #4240
Changes from all commits
Commits
Show all changes
15 commits
Select commit
Hold shift + click to select a range
9c1697f
[shardformer] added tests
klhhhhh b07e0c9
[shardformer] vit test finish and support
klhhhhh 3a28444
[shardformer] chatglm ready
klhhhhh 7f9c3a1
import chatglm
klhhhhh 4137e03
[shardformer] add test kit in model zoo for chatglm
klhhhhh 0d37788
[sharformer] add first version of policy of chatglm
klhhhhh 8296b42
[shardformer] polish chatglm code
klhhhhh 425f174
[shardformer] polish code
klhhhhh ee66714
[shardformer] support chatglm without layernorm
klhhhhh 5c82e62
[shardformer] chatglm shard without mlp sharding
klhhhhh 3ed27a2
[shardformer] delete some file
klhhhhh 92fff75
[shardformer] ChatGLM support layernorm sharding
klhhhhh 1d52b2b
[shardformer] register without auto policy
klhhhhh ae17a32
[shardformer] pre-commit check files
klhhhhh 243f7b8
[shardformer] fix chatglm configuration with pre-commit
klhhhhh File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,96 @@ | ||
| from typing import Dict, Union | ||
|
|
||
| import torch.nn as nn | ||
|
|
||
| import colossalai.shardformer.layer as col_nn | ||
|
|
||
| from .basepolicy import ModulePolicyDescription, Policy, SubModuleReplacementDescription | ||
|
|
||
| __all__ = ['ChatGLMModelPolicy', 'ChatGLMForConditionalGenerationPolicy'] | ||
|
|
||
|
|
||
| class ChatGLMModelPolicy(Policy): | ||
|
|
||
| def config_sanity_check(self): | ||
| pass | ||
|
|
||
| def preprocess(self): | ||
| # Resize embedding | ||
| vocab_size = self.model.config.padded_vocab_size | ||
| world_size = self.shard_config.tensor_parallel_size | ||
|
|
||
| if vocab_size % world_size != 0: | ||
| new_vocab_size = vocab_size + world_size - vocab_size % world_size | ||
| self.model.resize_token_embeddings(new_vocab_size) | ||
|
|
||
| return self.model | ||
|
|
||
| def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: | ||
| from tests.kit.model_zoo.transformers.chatglm2_6b.modeling_chatglm import ChatGLMModel, GLMBlock | ||
|
|
||
| policy = {} | ||
|
|
||
| if self.shard_config.enable_tensor_parallelism: | ||
|
|
||
| policy[ChatGLMModel] = ModulePolicyDescription(attribute_replacement={}, | ||
| sub_module_replacement=[ | ||
| SubModuleReplacementDescription( | ||
| suffix="embedding.word_embeddings", | ||
| target_module=col_nn.VocabParallelEmbedding1D, | ||
| ) | ||
| ]) | ||
|
|
||
| policy[GLMBlock] = ModulePolicyDescription(attribute_replacement={ | ||
| "self_attention.num_attention_heads_per_partition": | ||
| self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, | ||
| "self_attention.projection_size": | ||
| (self.model.config.kv_channels * self.model.config.num_attention_heads) // | ||
| self.shard_config.tensor_parallel_size, | ||
| "self_attention.qkv_hidden_size": | ||
| (self.model.config.kv_channels * self.model.config.num_attention_heads * 3) // | ||
| self.shard_config.tensor_parallel_size, | ||
| "self_attention.core_attention.num_attention_heads_per_partition": | ||
| self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, | ||
| "self_attention.core_attention.hidden_size_per_partition": | ||
| self.model.config.kv_channels * self.model.config.num_attention_heads // | ||
| self.shard_config.tensor_parallel_size, | ||
| }, | ||
| param_replacement=[], | ||
| sub_module_replacement=[ | ||
| SubModuleReplacementDescription( | ||
| suffix="self_attention.query_key_value", | ||
| target_module=col_nn.Linear1D_Col, | ||
| ), | ||
| SubModuleReplacementDescription( | ||
| suffix="self_attention.dense", | ||
| target_module=col_nn.Linear1D_Row, | ||
| ), | ||
| SubModuleReplacementDescription( | ||
| suffix="self_attention.core_attention.attention_dropout", | ||
| target_module=col_nn.DropoutForParallelInput, | ||
| ), | ||
| ]) | ||
| # optimization configuration | ||
| if self.shard_config.enable_fused_normalization: | ||
| if not self.model.config.rmsnorm: | ||
|
|
||
| self.append_or_create_submodule_replacement(description=[ | ||
| SubModuleReplacementDescription(suffix="input_layernorm", target_module=col_nn.FusedLayerNorm), | ||
| SubModuleReplacementDescription(suffix="post_attention_layernorm", | ||
| target_module=col_nn.FusedLayerNorm) | ||
| ], | ||
| policy=policy, | ||
| target_key=GLMBlock) | ||
|
|
||
| if self.model.config.post_layer_norm: | ||
| self.append_or_create_submodule_replacement(description=[ | ||
| SubModuleReplacementDescription(suffix="encoder.final_layernorm", | ||
| target_module=col_nn.FusedLayerNorm) | ||
| ], | ||
| policy=policy, | ||
| target_key=ChatGLMModel) | ||
|
|
||
| return policy | ||
|
|
||
| def postprocess(self): | ||
| return self.model |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,38 @@ | ||
| import torch | ||
| import transformers | ||
|
|
||
| from ..registry import ModelAttribute, model_zoo | ||
| from .chatglm2_6b.configuration_chatglm import ChatGLMConfig | ||
| from .chatglm2_6b.modeling_chatglm import ChatGLMModel | ||
|
|
||
| # ================================ | ||
| # Register single-sentence ChatGLM | ||
| # ================================ | ||
|
|
||
|
|
||
| def data_gen(): | ||
| input_ids = torch.tensor([[5941, 15, 2670, 3543, 632, 2075]], dtype=torch.int64) | ||
| attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1]]) | ||
| return dict(input_ids=input_ids, attention_mask=attention_mask) | ||
|
|
||
|
|
||
| # define output transform function | ||
| output_transform_fn = lambda x: x | ||
|
|
||
| # define loss function | ||
| loss_fn_for_chatglm_model = lambda x: x.last_hidden_state.mean() | ||
| loss_fn = lambda x: x.loss | ||
| config = ChatGLMConfig(num_layers=1, | ||
| padded_vocab_size=65024, | ||
| hidden_size=64, | ||
| num_attention_heads=8, | ||
| rmsnorm=False, | ||
| original_rope=True, | ||
| use_cache=True) | ||
|
|
||
| model_zoo.register(name='transformers_chatglm', | ||
| model_fn=lambda: ChatGLMModel(config, empty_init=False), | ||
| data_gen_fn=data_gen, | ||
| output_transform_fn=output_transform_fn, | ||
| loss_fn=loss_fn_for_chatglm_model, | ||
| model_attribute=ModelAttribute(has_control_flow=True)) | ||
58 changes: 58 additions & 0 deletions
58
tests/kit/model_zoo/transformers/chatglm2_6b/configuration_chatglm.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,58 @@ | ||
| from transformers import PretrainedConfig | ||
|
|
||
|
|
||
| class ChatGLMConfig(PretrainedConfig): | ||
| model_type = "chatglm" | ||
|
|
||
| def __init__(self, | ||
| num_layers=28, | ||
| padded_vocab_size=65024, | ||
| hidden_size=4096, | ||
| ffn_hidden_size=13696, | ||
| kv_channels=128, | ||
| num_attention_heads=32, | ||
| seq_length=2048, | ||
| hidden_dropout=0.0, | ||
| attention_dropout=0.0, | ||
| layernorm_epsilon=1e-5, | ||
| rmsnorm=True, | ||
| apply_residual_connection_post_layernorm=False, | ||
| post_layer_norm=True, | ||
| add_bias_linear=False, | ||
| add_qkv_bias=False, | ||
| bias_dropout_fusion=True, | ||
| multi_query_attention=False, | ||
| multi_query_group_num=1, | ||
| apply_query_key_layer_scaling=True, | ||
| attention_softmax_in_fp32=True, | ||
| fp32_residual_connection=False, | ||
| quantization_bit=0, | ||
| pre_seq_len=None, | ||
| prefix_projection=False, | ||
| **kwargs): | ||
| self.num_layers = num_layers | ||
| self.vocab_size = padded_vocab_size | ||
| self.padded_vocab_size = padded_vocab_size | ||
| self.hidden_size = hidden_size | ||
| self.ffn_hidden_size = ffn_hidden_size | ||
| self.kv_channels = kv_channels | ||
| self.num_attention_heads = num_attention_heads | ||
| self.seq_length = seq_length | ||
| self.hidden_dropout = hidden_dropout | ||
| self.attention_dropout = attention_dropout | ||
| self.layernorm_epsilon = layernorm_epsilon | ||
| self.rmsnorm = rmsnorm | ||
| self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm | ||
| self.post_layer_norm = post_layer_norm | ||
| self.add_bias_linear = add_bias_linear | ||
| self.add_qkv_bias = add_qkv_bias | ||
| self.bias_dropout_fusion = bias_dropout_fusion | ||
| self.multi_query_attention = multi_query_attention | ||
| self.multi_query_group_num = multi_query_group_num | ||
| self.apply_query_key_layer_scaling = apply_query_key_layer_scaling | ||
| self.attention_softmax_in_fp32 = attention_softmax_in_fp32 | ||
| self.fp32_residual_connection = fp32_residual_connection | ||
| self.quantization_bit = quantization_bit | ||
| self.pre_seq_len = pre_seq_len | ||
| self.prefix_projection = prefix_projection | ||
| super().__init__(**kwargs) |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.