From 8dfb77c2c4f2461d1e4dc0c4c2bf496e4082b4b3 Mon Sep 17 00:00:00 2001 From: ver217 Date: Wed, 20 Mar 2024 17:35:20 +0800 Subject: [PATCH 1/8] [misc] add submodule --- .gitmodules | 3 +++ examples/language/grok-1/grok_1 | 1 + 2 files changed, 4 insertions(+) create mode 160000 examples/language/grok-1/grok_1 diff --git a/.gitmodules b/.gitmodules index a4f30caa4cd1..9018e8640167 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "examples/tutorial/fastfold/FastFold"] path = examples/tutorial/fastfold/FastFold url = https://github.com/hpcaitech/FastFold +[submodule "grok-1"] + path = examples/language/grok-1/grok_1 + url = https://huggingface.co/hpcai-tech/grok-1 diff --git a/examples/language/grok-1/grok_1 b/examples/language/grok-1/grok_1 new file mode 160000 index 000000000000..015a18b1288e --- /dev/null +++ b/examples/language/grok-1/grok_1 @@ -0,0 +1 @@ +Subproject commit 015a18b1288e56d5bf9e53606908ec6abe436aae From d998bf1a834d617d5b59474553ca142bd314ffdb Mon Sep 17 00:00:00 2001 From: ver217 Date: Thu, 21 Mar 2024 15:44:24 +0800 Subject: [PATCH 2/8] remove submodule --- .gitmodules | 3 --- examples/language/grok-1/grok_1 | 1 - 2 files changed, 4 deletions(-) delete mode 160000 examples/language/grok-1/grok_1 diff --git a/.gitmodules b/.gitmodules index 9018e8640167..a4f30caa4cd1 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ [submodule "examples/tutorial/fastfold/FastFold"] path = examples/tutorial/fastfold/FastFold url = https://github.com/hpcaitech/FastFold -[submodule "grok-1"] - path = examples/language/grok-1/grok_1 - url = https://huggingface.co/hpcai-tech/grok-1 diff --git a/examples/language/grok-1/grok_1 b/examples/language/grok-1/grok_1 deleted file mode 160000 index 015a18b1288e..000000000000 --- a/examples/language/grok-1/grok_1 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 015a18b1288e56d5bf9e53606908ec6abe436aae From 56156fccbd366f5d848e8c9bcb4630eeb4f92579 Mon Sep 17 00:00:00 2001 From: ver217 Date: Thu, 21 Mar 2024 15:45:18 +0800 Subject: [PATCH 3/8] [example] support grok-1 tp inference --- examples/language/grok-1/grok1_policy.py | 99 ++++++++++++++++++++++++ examples/language/grok-1/inference.py | 58 ++++++++++++++ examples/language/grok-1/inference_tp.py | 76 ++++++++++++++++++ 3 files changed, 233 insertions(+) create mode 100644 examples/language/grok-1/grok1_policy.py create mode 100644 examples/language/grok-1/inference.py create mode 100644 examples/language/grok-1/inference_tp.py diff --git a/examples/language/grok-1/grok1_policy.py b/examples/language/grok-1/grok1_policy.py new file mode 100644 index 000000000000..aefea6f3df1c --- /dev/null +++ b/examples/language/grok-1/grok1_policy.py @@ -0,0 +1,99 @@ +from typing import Dict, Union + +import torch.nn as nn + +from colossalai.shardformer.layer import Linear1D_Col, Linear1D_Row, VocabParallelEmbedding1D +from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription + + +class Grok1Policy(Policy): + def config_sanity_check(self): + pass + + def preprocess(self) -> nn.Module: + if self.shard_config.enable_tensor_parallelism: + vocab_size = self.model.config.vocab_size + world_size = self.shard_config.tensor_parallel_size + assert vocab_size % world_size == 0, f"vocab_size {vocab_size} must be divisible by world_size {world_size}" + return self.model + + def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: + policy = {} + if self.shard_config.enable_tensor_parallelism: + decoder_attribute_replacement = { + "attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, + "attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, + "attn.num_key_value_heads": self.model.config.num_key_value_heads + // self.shard_config.tensor_parallel_size, + } + decoder_submodule_replacement = [ + SubModuleReplacementDescription( + suffix="attn.q_proj", + target_module=Linear1D_Col, + ), + SubModuleReplacementDescription( + suffix="attn.k_proj", + target_module=Linear1D_Col, + ), + SubModuleReplacementDescription( + suffix="attn.v_proj", + target_module=Linear1D_Col, + ), + SubModuleReplacementDescription( + suffix="attn.o_proj", + target_module=Linear1D_Row, + ), + ] + for i in range(self.model.config.num_experts): + decoder_submodule_replacement.extend( + [ + SubModuleReplacementDescription( + suffix=f"moe_block.experts[{i}].linear", + target_module=Linear1D_Col, + ), + SubModuleReplacementDescription( + suffix=f"moe_block.experts[{i}].linear_v", + target_module=Linear1D_Col, + ), + SubModuleReplacementDescription( + suffix=f"moe_block.experts[{i}].linear_1", + target_module=Linear1D_Row, + ), + ] + ) + + policy["DecoderLayer"] = ModulePolicyDescription( + attribute_replacement=decoder_attribute_replacement, + sub_module_replacement=decoder_submodule_replacement, + ) + self.append_or_create_submodule_replacement( + description=SubModuleReplacementDescription( + suffix="embed_tokens", + target_module=VocabParallelEmbedding1D, + ), + policy=policy, + target_key="Grok1Model", + ) + return policy + + def postprocess(self): + return self.model + + +class Grok1ModelPolicy(Grok1Policy): + pass + + +class Grok1ForCausalLMPolicy(Grok1Policy): + def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: + policy = super().module_policy() + self.append_or_create_submodule_replacement( + description=SubModuleReplacementDescription( + suffix="lm_head", + target_module=Linear1D_Col, + kwargs={"gather_output": not self.shard_config.parallel_output}, + ), + policy=policy, + target_key="Grok1ModelForCausalLM", + ) + return policy diff --git a/examples/language/grok-1/inference.py b/examples/language/grok-1/inference.py new file mode 100644 index 000000000000..7dbfa231da23 --- /dev/null +++ b/examples/language/grok-1/inference.py @@ -0,0 +1,58 @@ +import argparse +import time + +import torch +from sentencepiece import SentencePieceProcessor +from transformers import AutoModelForCausalLM + + +class Bcolors: + HEADER = "\033[95m" + OKBLUE = "\033[94m" + OKCYAN = "\033[96m" + OKGREEN = "\033[92m" + WARNING = "\033[93m" + FAIL = "\033[91m" + ENDC = "\033[0m" + BOLD = "\033[1m" + UNDERLINE = "\033[4m" + + +def print_output(text, output): + print(f"-----\n{Bcolors.OKBLUE}{text}{Bcolors.ENDC}{output[len(text):]}") + + +@torch.no_grad() +def inference(model, sp, text, max_new_tokens): + input_ids = sp.encode(text) + input_ids = torch.tensor([input_ids]).cuda() + attention_mask = torch.ones_like(input_ids) + inputs = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "max_new_tokens": max_new_tokens, + } + outputs = model.generate(**inputs) + return outputs[0].tolist() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--pretrained", type=str, default="hpcaitech/grok-1") + parser.add_argument("--tokenizer", type=str, default="tokenizer.model") + parser.add_argument("--text", type=str, nargs="+", default=["Hi, what's your name?"]) + parser.add_argument("--max_new_tokens", type=int, default=30) + args = parser.parse_args() + start = time.time() + torch.set_default_dtype(torch.bfloat16) + model = AutoModelForCausalLM.from_pretrained( + args.pretrained, + trust_remote_code=True, + device_map="auto", + torch_dtype=torch.bfloat16, + ) + sp = SentencePieceProcessor(model_file=args.tokenizer) + for text in args.text: + output = inference(model, sp, text, args.max_new_tokens) + print_output(text, sp.decode(output)) + print(f"Overall time: {time.time() - start} seconds.") diff --git a/examples/language/grok-1/inference_tp.py b/examples/language/grok-1/inference_tp.py new file mode 100644 index 000000000000..fa5d5301fbe1 --- /dev/null +++ b/examples/language/grok-1/inference_tp.py @@ -0,0 +1,76 @@ +import argparse +import time + +import torch +from grok1_policy import Grok1ForCausalLMPolicy +from sentencepiece import SentencePieceProcessor +from transformers import AutoModelForCausalLM + +import colossalai +from colossalai.booster import Booster +from colossalai.booster.plugin import HybridParallelPlugin +from colossalai.cluster import DistCoordinator +from colossalai.lazy import LazyInitContext +from colossalai.utils import get_current_device + + +class Bcolors: + HEADER = "\033[95m" + OKBLUE = "\033[94m" + OKCYAN = "\033[96m" + OKGREEN = "\033[92m" + WARNING = "\033[93m" + FAIL = "\033[91m" + ENDC = "\033[0m" + BOLD = "\033[1m" + UNDERLINE = "\033[4m" + + +def print_output(text, output): + print(f"-----\n{Bcolors.OKBLUE}{text}{Bcolors.ENDC}{output[len(text):]}") + + +@torch.no_grad() +def inference(model, sp, text, max_new_tokens): + input_ids = sp.encode(text) + input_ids = torch.tensor([input_ids]).cuda() + attention_mask = torch.ones_like(input_ids) + inputs = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "max_new_tokens": max_new_tokens, + } + outputs = model.generate(**inputs) + return outputs[0].tolist() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--pretrained", type=str, default="hpcaitech/grok-1") + parser.add_argument("--tokenizer", type=str, default="tokenizer.model") + parser.add_argument("--text", type=str, nargs="+", default=["Hi, what's your name?"]) + parser.add_argument("--max_new_tokens", type=int, default=30) + args = parser.parse_args() + start = time.time() + colossalai.launch_from_torch({}) + coordinator = DistCoordinator() + plugin = HybridParallelPlugin( + tp_size=coordinator.world_size, + pp_size=1, + precision="bf16", + parallel_output=False, + custom_policy=Grok1ForCausalLMPolicy(), + ) + booster = Booster(plugin=plugin) + torch.set_default_dtype(torch.bfloat16) + with LazyInitContext(default_device=get_current_device()): + model = AutoModelForCausalLM.from_pretrained( + args.pretrained, trust_remote_code=True, torch_dtype=torch.bfloat16 + ) + model, *_ = booster.boost(model) + sp = SentencePieceProcessor(model_file=args.tokenizer) + for text in args.text: + output = inference(model.unwrap(), sp, text, args.max_new_tokens) + if coordinator.is_master(): + print_output(text, sp.decode(output)) + coordinator.print_on_master(f"Overall time: {time.time() - start} seconds.") From f1824486f602a7ddf7da61093e9930cb742343dd Mon Sep 17 00:00:00 2001 From: ver217 Date: Thu, 21 Mar 2024 16:33:38 +0800 Subject: [PATCH 4/8] [example] add grok-1 inference script --- examples/language/grok-1/inference.py | 19 ++++++++++++++++--- examples/language/grok-1/inference_tp.py | 19 ++++++++++++++++--- .../language/grok-1/run_inference_fast.sh | 12 ++++++++++++ .../language/grok-1/run_inference_slow.sh | 12 ++++++++++++ 4 files changed, 56 insertions(+), 6 deletions(-) create mode 100755 examples/language/grok-1/run_inference_fast.sh create mode 100755 examples/language/grok-1/run_inference_slow.sh diff --git a/examples/language/grok-1/inference.py b/examples/language/grok-1/inference.py index 7dbfa231da23..5ca7d83ae34c 100644 --- a/examples/language/grok-1/inference.py +++ b/examples/language/grok-1/inference.py @@ -23,14 +23,14 @@ def print_output(text, output): @torch.no_grad() -def inference(model, sp, text, max_new_tokens): +def inference(model, sp, text, **generate_kwargs): input_ids = sp.encode(text) input_ids = torch.tensor([input_ids]).cuda() attention_mask = torch.ones_like(input_ids) inputs = { "input_ids": input_ids, "attention_mask": attention_mask, - "max_new_tokens": max_new_tokens, + **generate_kwargs, } outputs = model.generate(**inputs) return outputs[0].tolist() @@ -42,6 +42,10 @@ def inference(model, sp, text, max_new_tokens): parser.add_argument("--tokenizer", type=str, default="tokenizer.model") parser.add_argument("--text", type=str, nargs="+", default=["Hi, what's your name?"]) parser.add_argument("--max_new_tokens", type=int, default=30) + parser.add_argument("--do_sample", action="store_true", default=False) + parser.add_argument("--temperature", type=float, default=0.3, help="Set temperature value") + parser.add_argument("--top_k", type=int, default=50, help="Set top_k value for top-k-filtering") + parser.add_argument("--top_p", type=float, default=0.95, help="Set top_p value for generation") args = parser.parse_args() start = time.time() torch.set_default_dtype(torch.bfloat16) @@ -53,6 +57,15 @@ def inference(model, sp, text, max_new_tokens): ) sp = SentencePieceProcessor(model_file=args.tokenizer) for text in args.text: - output = inference(model, sp, text, args.max_new_tokens) + output = inference( + model, + sp, + text, + max_new_tokens=args.max_new_tokens, + do_sample=args.do_sample, + temperature=args.temperature, + top_k=args.top_k, + top_p=args.top_p, + ) print_output(text, sp.decode(output)) print(f"Overall time: {time.time() - start} seconds.") diff --git a/examples/language/grok-1/inference_tp.py b/examples/language/grok-1/inference_tp.py index fa5d5301fbe1..a1f49c7c9835 100644 --- a/examples/language/grok-1/inference_tp.py +++ b/examples/language/grok-1/inference_tp.py @@ -31,14 +31,14 @@ def print_output(text, output): @torch.no_grad() -def inference(model, sp, text, max_new_tokens): +def inference(model, sp, text, **generate_kwargs): input_ids = sp.encode(text) input_ids = torch.tensor([input_ids]).cuda() attention_mask = torch.ones_like(input_ids) inputs = { "input_ids": input_ids, "attention_mask": attention_mask, - "max_new_tokens": max_new_tokens, + **generate_kwargs, } outputs = model.generate(**inputs) return outputs[0].tolist() @@ -50,6 +50,10 @@ def inference(model, sp, text, max_new_tokens): parser.add_argument("--tokenizer", type=str, default="tokenizer.model") parser.add_argument("--text", type=str, nargs="+", default=["Hi, what's your name?"]) parser.add_argument("--max_new_tokens", type=int, default=30) + parser.add_argument("--do_sample", action="store_true", default=False) + parser.add_argument("--temperature", type=float, default=0.3, help="Set temperature value") + parser.add_argument("--top_k", type=int, default=50, help="Set top_k value for top-k-filtering") + parser.add_argument("--top_p", type=float, default=0.95, help="Set top_p value for generation") args = parser.parse_args() start = time.time() colossalai.launch_from_torch({}) @@ -70,7 +74,16 @@ def inference(model, sp, text, max_new_tokens): model, *_ = booster.boost(model) sp = SentencePieceProcessor(model_file=args.tokenizer) for text in args.text: - output = inference(model.unwrap(), sp, text, args.max_new_tokens) + output = inference( + model.unwrap(), + sp, + text, + max_new_tokens=args.max_new_tokens, + do_sample=args.do_sample, + temperature=args.temperature, + top_k=args.top_k, + top_p=args.top_p, + ) if coordinator.is_master(): print_output(text, sp.decode(output)) coordinator.print_on_master(f"Overall time: {time.time() - start} seconds.") diff --git a/examples/language/grok-1/run_inference_fast.sh b/examples/language/grok-1/run_inference_fast.sh new file mode 100755 index 000000000000..41fa0ce29c50 --- /dev/null +++ b/examples/language/grok-1/run_inference_fast.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +PRETRAINED=${1:-"hpcaitech/grok-1"} +TOKENIZER=${2:-"tokenizer.model"} + +torchrun --standalone --nproc_per_node 8 inference_tp.py --pretrained "$PRETRAINED" \ + --tokenizer "$TOKENIZER" \ + --max_new_tokens 64 \ + --do_sample \ + --text "The company's annual conference, featuring keynote speakers and exclusive product launches, will be held at the Los Angeles Convention Center from October 20th to October 23rd, 2021. Extract the date mentioned in the above sentence." \ + "将以下句子翻译成英语。 我喜欢看电影和读书。" \ + "All books have the same weight, 10 books weigh 5kg, what is the weight of 2 books?" diff --git a/examples/language/grok-1/run_inference_slow.sh b/examples/language/grok-1/run_inference_slow.sh new file mode 100755 index 000000000000..545f99ad695c --- /dev/null +++ b/examples/language/grok-1/run_inference_slow.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +PRETRAINED=${1:-"hpcaitech/grok-1"} +TOKENIZER=${2:-"tokenizer.model"} + +python3 inference.py --pretrained "$PRETRAINED" \ + --tokenizer "$TOKENIZER" \ + --max_new_tokens 64 \ + --do_sample \ + --text "The company's annual conference, featuring keynote speakers and exclusive product launches, will be held at the Los Angeles Convention Center from October 20th to October 23rd, 2021. Extract the date mentioned in the above sentence." \ + "将以下句子翻译成英语。 我喜欢看电影和读书。" \ + "All books have the same weight, 10 books weigh 5kg, what is the weight of 2 books?" From 95f48af1ea64fe23ed3f7028a4b6e4ca61c08a05 Mon Sep 17 00:00:00 2001 From: ver217 Date: Thu, 21 Mar 2024 16:42:12 +0800 Subject: [PATCH 5/8] [example] refactor code --- examples/language/grok-1/inference.py | 43 ++-------------------- examples/language/grok-1/inference_tp.py | 43 ++-------------------- examples/language/grok-1/utils.py | 46 ++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 82 deletions(-) create mode 100644 examples/language/grok-1/utils.py diff --git a/examples/language/grok-1/inference.py b/examples/language/grok-1/inference.py index 5ca7d83ae34c..ca0ad0d4fe95 100644 --- a/examples/language/grok-1/inference.py +++ b/examples/language/grok-1/inference.py @@ -1,51 +1,12 @@ -import argparse import time import torch from sentencepiece import SentencePieceProcessor from transformers import AutoModelForCausalLM - - -class Bcolors: - HEADER = "\033[95m" - OKBLUE = "\033[94m" - OKCYAN = "\033[96m" - OKGREEN = "\033[92m" - WARNING = "\033[93m" - FAIL = "\033[91m" - ENDC = "\033[0m" - BOLD = "\033[1m" - UNDERLINE = "\033[4m" - - -def print_output(text, output): - print(f"-----\n{Bcolors.OKBLUE}{text}{Bcolors.ENDC}{output[len(text):]}") - - -@torch.no_grad() -def inference(model, sp, text, **generate_kwargs): - input_ids = sp.encode(text) - input_ids = torch.tensor([input_ids]).cuda() - attention_mask = torch.ones_like(input_ids) - inputs = { - "input_ids": input_ids, - "attention_mask": attention_mask, - **generate_kwargs, - } - outputs = model.generate(**inputs) - return outputs[0].tolist() - +from utils import get_defualt_parser, inference, print_output if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--pretrained", type=str, default="hpcaitech/grok-1") - parser.add_argument("--tokenizer", type=str, default="tokenizer.model") - parser.add_argument("--text", type=str, nargs="+", default=["Hi, what's your name?"]) - parser.add_argument("--max_new_tokens", type=int, default=30) - parser.add_argument("--do_sample", action="store_true", default=False) - parser.add_argument("--temperature", type=float, default=0.3, help="Set temperature value") - parser.add_argument("--top_k", type=int, default=50, help="Set top_k value for top-k-filtering") - parser.add_argument("--top_p", type=float, default=0.95, help="Set top_p value for generation") + parser = get_defualt_parser() args = parser.parse_args() start = time.time() torch.set_default_dtype(torch.bfloat16) diff --git a/examples/language/grok-1/inference_tp.py b/examples/language/grok-1/inference_tp.py index a1f49c7c9835..99de60e1f6be 100644 --- a/examples/language/grok-1/inference_tp.py +++ b/examples/language/grok-1/inference_tp.py @@ -1,10 +1,10 @@ -import argparse import time import torch from grok1_policy import Grok1ForCausalLMPolicy from sentencepiece import SentencePieceProcessor from transformers import AutoModelForCausalLM +from utils import get_defualt_parser, inference, print_output import colossalai from colossalai.booster import Booster @@ -13,47 +13,8 @@ from colossalai.lazy import LazyInitContext from colossalai.utils import get_current_device - -class Bcolors: - HEADER = "\033[95m" - OKBLUE = "\033[94m" - OKCYAN = "\033[96m" - OKGREEN = "\033[92m" - WARNING = "\033[93m" - FAIL = "\033[91m" - ENDC = "\033[0m" - BOLD = "\033[1m" - UNDERLINE = "\033[4m" - - -def print_output(text, output): - print(f"-----\n{Bcolors.OKBLUE}{text}{Bcolors.ENDC}{output[len(text):]}") - - -@torch.no_grad() -def inference(model, sp, text, **generate_kwargs): - input_ids = sp.encode(text) - input_ids = torch.tensor([input_ids]).cuda() - attention_mask = torch.ones_like(input_ids) - inputs = { - "input_ids": input_ids, - "attention_mask": attention_mask, - **generate_kwargs, - } - outputs = model.generate(**inputs) - return outputs[0].tolist() - - if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--pretrained", type=str, default="hpcaitech/grok-1") - parser.add_argument("--tokenizer", type=str, default="tokenizer.model") - parser.add_argument("--text", type=str, nargs="+", default=["Hi, what's your name?"]) - parser.add_argument("--max_new_tokens", type=int, default=30) - parser.add_argument("--do_sample", action="store_true", default=False) - parser.add_argument("--temperature", type=float, default=0.3, help="Set temperature value") - parser.add_argument("--top_k", type=int, default=50, help="Set top_k value for top-k-filtering") - parser.add_argument("--top_p", type=float, default=0.95, help="Set top_p value for generation") + parser = get_defualt_parser() args = parser.parse_args() start = time.time() colossalai.launch_from_torch({}) diff --git a/examples/language/grok-1/utils.py b/examples/language/grok-1/utils.py new file mode 100644 index 000000000000..f113f852eff6 --- /dev/null +++ b/examples/language/grok-1/utils.py @@ -0,0 +1,46 @@ +import argparse + +import torch + + +class Bcolors: + HEADER = "\033[95m" + OKBLUE = "\033[94m" + OKCYAN = "\033[96m" + OKGREEN = "\033[92m" + WARNING = "\033[93m" + FAIL = "\033[91m" + ENDC = "\033[0m" + BOLD = "\033[1m" + UNDERLINE = "\033[4m" + + +def print_output(text, output): + print(f"-----\n{Bcolors.OKBLUE}{text}{Bcolors.ENDC}{output[len(text):]}") + + +@torch.no_grad() +def inference(model, sp, text, **generate_kwargs): + input_ids = sp.encode(text) + input_ids = torch.tensor([input_ids]).cuda() + attention_mask = torch.ones_like(input_ids) + inputs = { + "input_ids": input_ids, + "attention_mask": attention_mask, + **generate_kwargs, + } + outputs = model.generate(**inputs) + return outputs[0].tolist() + + +def get_defualt_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("--pretrained", type=str, default="hpcaitech/grok-1") + parser.add_argument("--tokenizer", type=str, default="tokenizer.model") + parser.add_argument("--text", type=str, nargs="+", default=["Hi, what's your name?"]) + parser.add_argument("--max_new_tokens", type=int, default=30) + parser.add_argument("--do_sample", action="store_true", default=False) + parser.add_argument("--temperature", type=float, default=0.3, help="Set temperature value") + parser.add_argument("--top_k", type=int, default=50, help="Set top_k value for top-k-filtering") + parser.add_argument("--top_p", type=float, default=0.95, help="Set top_p value for generation") + return parser From b736b3f55c58447d71a02916f11ee1c8284ba4fc Mon Sep 17 00:00:00 2001 From: ver217 Date: Thu, 21 Mar 2024 17:40:22 +0800 Subject: [PATCH 6/8] [example] add grok-1 readme --- examples/language/grok-1/README.md | 41 +++++++++++++++++++ examples/language/grok-1/requirements.txt | 4 ++ .../language/grok-1/run_inference_fast.sh | 1 - .../language/grok-1/run_inference_slow.sh | 1 - 4 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 examples/language/grok-1/README.md create mode 100644 examples/language/grok-1/requirements.txt diff --git a/examples/language/grok-1/README.md b/examples/language/grok-1/README.md new file mode 100644 index 000000000000..31f8c613d021 --- /dev/null +++ b/examples/language/grok-1/README.md @@ -0,0 +1,41 @@ +# Grok-1 Inference + +## Install + +```bash +# Make sure you install colossalai from the latest source code +git clone https://github.com/hpcaitech/ColossalAI.git +cd ColossalAI +pip install . +cd examples/language/grok-1 +pip install -r requirements.txt +``` + +## Tokenizer preparation + +You should download the tokenizer from the official grok-1 repository. + +```bash +wget https://github.com/xai-org/grok-1/raw/main/tokenizer.model +``` + +## Inference + +We provide two scripts for inference. `run_inference_fast.sh` uses tensor parallelism provided by ColossalAI, and it is faster. `run_inference_slow.sh` uses auto device provided by transformers, and it is slower. + +Command format: + +```bash +./run_inference_fast.sh +./run_inference_slow.sh +``` + +`model_name_or_path` can be a local path or a model name from Hugging Face model hub. We provided weights on model hub, named `hpcaitech/grok-1`. + +Command example: + +```bash +./run_inference_fast.sh hpcaitech/grok-1 tokenizer.model +``` + +It will take 5-10 minutes to load checkpoints. Don't worry, it's not stuck. diff --git a/examples/language/grok-1/requirements.txt b/examples/language/grok-1/requirements.txt new file mode 100644 index 000000000000..15d5ea53a15e --- /dev/null +++ b/examples/language/grok-1/requirements.txt @@ -0,0 +1,4 @@ +torch>=2.1.0,<2.2.0 +colossalai>=0.3.6 +sentencepiece==0.1.99 +transformers==4.35.0 diff --git a/examples/language/grok-1/run_inference_fast.sh b/examples/language/grok-1/run_inference_fast.sh index 41fa0ce29c50..0dc398c53e33 100755 --- a/examples/language/grok-1/run_inference_fast.sh +++ b/examples/language/grok-1/run_inference_fast.sh @@ -6,7 +6,6 @@ TOKENIZER=${2:-"tokenizer.model"} torchrun --standalone --nproc_per_node 8 inference_tp.py --pretrained "$PRETRAINED" \ --tokenizer "$TOKENIZER" \ --max_new_tokens 64 \ - --do_sample \ --text "The company's annual conference, featuring keynote speakers and exclusive product launches, will be held at the Los Angeles Convention Center from October 20th to October 23rd, 2021. Extract the date mentioned in the above sentence." \ "将以下句子翻译成英语。 我喜欢看电影和读书。" \ "All books have the same weight, 10 books weigh 5kg, what is the weight of 2 books?" diff --git a/examples/language/grok-1/run_inference_slow.sh b/examples/language/grok-1/run_inference_slow.sh index 545f99ad695c..c64dd93b9e62 100755 --- a/examples/language/grok-1/run_inference_slow.sh +++ b/examples/language/grok-1/run_inference_slow.sh @@ -6,7 +6,6 @@ TOKENIZER=${2:-"tokenizer.model"} python3 inference.py --pretrained "$PRETRAINED" \ --tokenizer "$TOKENIZER" \ --max_new_tokens 64 \ - --do_sample \ --text "The company's annual conference, featuring keynote speakers and exclusive product launches, will be held at the Los Angeles Convention Center from October 20th to October 23rd, 2021. Extract the date mentioned in the above sentence." \ "将以下句子翻译成英语。 我喜欢看电影和读书。" \ "All books have the same weight, 10 books weigh 5kg, what is the weight of 2 books?" From 0caefe12d662bbd591a139a5750cd95d6f837c78 Mon Sep 17 00:00:00 2001 From: ver217 Date: Thu, 21 Mar 2024 17:42:07 +0800 Subject: [PATCH 7/8] [exmaple] add test ci --- examples/language/grok-1/test_ci.sh | 1 + 1 file changed, 1 insertion(+) create mode 100755 examples/language/grok-1/test_ci.sh diff --git a/examples/language/grok-1/test_ci.sh b/examples/language/grok-1/test_ci.sh new file mode 100755 index 000000000000..f6a0d658462b --- /dev/null +++ b/examples/language/grok-1/test_ci.sh @@ -0,0 +1 @@ +pip install -r requirements.txt From 135b95fe5af10f3e2752e791de836b86f00de576 Mon Sep 17 00:00:00 2001 From: ver217 Date: Thu, 21 Mar 2024 17:44:41 +0800 Subject: [PATCH 8/8] [exmaple] update readme --- examples/language/grok-1/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/language/grok-1/README.md b/examples/language/grok-1/README.md index 31f8c613d021..c523f941262d 100644 --- a/examples/language/grok-1/README.md +++ b/examples/language/grok-1/README.md @@ -21,6 +21,8 @@ wget https://github.com/xai-org/grok-1/raw/main/tokenizer.model ## Inference +You need 8x A100 80GB or equivalent GPUs to run the inference. + We provide two scripts for inference. `run_inference_fast.sh` uses tensor parallelism provided by ColossalAI, and it is faster. `run_inference_slow.sh` uses auto device provided by transformers, and it is slower. Command format: