From eeb49e27f90869a9a9d94562c57adb4840739c5c Mon Sep 17 00:00:00 2001 From: edbeeching Date: Wed, 6 Sep 2023 11:09:07 +0200 Subject: [PATCH 01/52] adds fastchat dialogue template and updates falcon model to use this template --- fastchat/conversation.py | 13 +++++++++++++ fastchat/model/model_adapter.py | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index f733be68a..d68b92de7 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -778,6 +778,19 @@ def get_conv_template(name: str) -> Conversation: stop_str="<|end|>", ) ) +# H4 default_v2 template template +# reference: https://huggingface.co/spaces/HuggingFaceH4/starchat-playground/blob/main/dialogues.py +register_conv_template( + Conversation( + name="h4_default_v2", + system_template="<|system|>\n{system_message}", + roles=("<|user|>", "<|assistant|>"), + sep_style=SeparatorStyle.CHATML, + sep="<|endoftext|>", + stop_token_ids=[0, 49155], + stop_str="<|end|>", + ) +) # Baichuan-13B-Chat template register_conv_template( diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 8c2fbde32..95632b091 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1110,7 +1110,7 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): return model, tokenizer def get_default_conv_template(self, model_path: str) -> Conversation: - return get_conv_template("falcon") + return get_conv_template("h4_default_v2") class TigerBotAdapter(BaseModelAdapter): From 21b451064a496a1f9134d139542eaa2de2e6edaf Mon Sep 17 00:00:00 2001 From: edbeeching Date: Wed, 6 Sep 2023 12:02:58 +0200 Subject: [PATCH 02/52] adds model revision --- fastchat/llm_judge/gen_model_answer.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/fastchat/llm_judge/gen_model_answer.py b/fastchat/llm_judge/gen_model_answer.py index 3d093ecd5..9fc12ae63 100644 --- a/fastchat/llm_judge/gen_model_answer.py +++ b/fastchat/llm_judge/gen_model_answer.py @@ -19,6 +19,7 @@ def run_eval( model_path, + model_revision, model_id, question_file, question_begin, @@ -51,6 +52,7 @@ def run_eval( ans_handles.append( get_answers_func( model_path, + model_revision, model_id, questions[i : i + chunk_size], answer_file, @@ -68,6 +70,7 @@ def run_eval( @torch.inference_mode() def get_model_answers( model_path, + model_revision, model_id, questions, answer_file, @@ -84,6 +87,7 @@ def get_model_answers( load_8bit=False, cpu_offloading=False, debug=False, + revision=model_revision ) for question in tqdm(questions): @@ -192,6 +196,12 @@ def reorg_answer_file(answer_file): required=True, help="The path to the weights. This can be a local folder or a Hugging Face repo ID.", ) + parser.add_argument( + "--model-revision", + type=str, + default="main", + help="The revision of the model on the huggingface hub, default='main'", + ) parser.add_argument("--model-id", type=str, required=True) parser.add_argument( "--bench-name", @@ -251,6 +261,8 @@ def reorg_answer_file(answer_file): run_eval( args.model_path, + args.model_revision, + #args.model_trust_remote_code, args.model_id, question_file, args.question_begin, From 1d379e255eb6ea401cf729422e73166d3c67cf01 Mon Sep 17 00:00:00 2001 From: Edward Beeching Date: Wed, 6 Sep 2023 15:37:45 +0200 Subject: [PATCH 03/52] Update fastchat/conversation.py Co-authored-by: lewtun --- fastchat/conversation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index d68b92de7..6ef39fb5f 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -788,7 +788,7 @@ def get_conv_template(name: str) -> Conversation: sep_style=SeparatorStyle.CHATML, sep="<|endoftext|>", stop_token_ids=[0, 49155], - stop_str="<|end|>", + stop_str="<|endoftext|>", ) ) From ab0cf134b4412f12b8109bd5067666cb06d35841 Mon Sep 17 00:00:00 2001 From: edbeeching Date: Thu, 7 Sep 2023 14:36:01 +0200 Subject: [PATCH 04/52] changes name from fschat to fastchat to avoid naming issues during install in h4 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6c1d12f5e..01e60c035 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ requires = ["setuptools>=61.0"] build-backend = "setuptools.build_meta" [project] -name = "fschat" +name = "fastchat" version = "0.2.26" description = "An open platform for training, serving, and evaluating large language model based chatbots." readme = "README.md" From 806f080284533d2db9d59c1b1cf64204c5ca7705 Mon Sep 17 00:00:00 2001 From: edbeeching Date: Fri, 8 Sep 2023 11:29:20 +0200 Subject: [PATCH 05/52] adds trust remote code option for falcon model --- fastchat/llm_judge/gen_model_answer.py | 20 +++++++++++++++++--- fastchat/model/model_adapter.py | 4 +++- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/fastchat/llm_judge/gen_model_answer.py b/fastchat/llm_judge/gen_model_answer.py index 9fc12ae63..967d885ce 100644 --- a/fastchat/llm_judge/gen_model_answer.py +++ b/fastchat/llm_judge/gen_model_answer.py @@ -15,11 +15,21 @@ from fastchat.llm_judge.common import load_questions, temperature_config from fastchat.model import load_model, get_conversation_template - +def str2bool(v): + """Convert string to boolean.""" + if isinstance(v, bool): + return v + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') def run_eval( model_path, model_revision, + trust_remote_code, model_id, question_file, question_begin, @@ -53,6 +63,7 @@ def run_eval( get_answers_func( model_path, model_revision, + trust_remote_code, model_id, questions[i : i + chunk_size], answer_file, @@ -71,6 +82,7 @@ def run_eval( def get_model_answers( model_path, model_revision, + trust_remote_code, model_id, questions, answer_file, @@ -87,7 +99,8 @@ def get_model_answers( load_8bit=False, cpu_offloading=False, debug=False, - revision=model_revision + revision=model_revision, + trust_remote_code=trust_remote_code, ) for question in tqdm(questions): @@ -202,6 +215,7 @@ def reorg_answer_file(answer_file): default="main", help="The revision of the model on the huggingface hub, default='main'", ) + parser.add_argument("--trust-remote-code", type=str2bool, nargs='?', const=True, default=False, help="A boolean flag",) parser.add_argument("--model-id", type=str, required=True) parser.add_argument( "--bench-name", @@ -262,7 +276,7 @@ def reorg_answer_file(answer_file): run_eval( args.model_path, args.model_revision, - #args.model_trust_remote_code, + args.trust_remote_code, args.model_id, question_file, args.question_begin, diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 95632b091..e511c1be3 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -157,6 +157,7 @@ def load_model( awq_config: Optional[AWQConfig] = None, revision: str = "main", debug: bool = False, + trust_remote_code=False, ): """Load a model from Hugging Face.""" # get model adapter @@ -231,6 +232,7 @@ def load_model( device=device, torch_dtype=kwargs["torch_dtype"], revision=revision, + trust_remote_code=trust_remote_code ) if debug: print(model) @@ -273,6 +275,7 @@ def load_model( model.to(device) return model, tokenizer kwargs["revision"] = revision + kwargs["trust_remote_code"] = trust_remote_code # Load model model, tokenizer = adapter.load_model(model_path, kwargs) @@ -1101,7 +1104,6 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): model = AutoModelForCausalLM.from_pretrained( model_path, low_cpu_mem_usage=True, - trust_remote_code=True, **from_pretrained_kwargs, ) # In Falcon tokenizer config and special config there is not any pad token From cc9bf67c762f2ce9b37a41107c4035111e641ff3 Mon Sep 17 00:00:00 2001 From: edbeeching Date: Mon, 18 Sep 2023 15:44:48 +0200 Subject: [PATCH 06/52] adds peft model support --- fastchat/model/model_adapter.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index e511c1be3..38ac82db8 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -23,8 +23,10 @@ LlamaTokenizer, LlamaForCausalLM, T5Tokenizer, + BitsAndBytesConfig ) - +from peft import PeftConfig, PeftModel +from huggingface_hub import list_repo_files from fastchat.constants import CPU_ISA from fastchat.modules.gptq import GptqConfig, load_gptq_quantized from fastchat.modules.awq import AWQConfig, load_awq_quantized @@ -47,6 +49,10 @@ os.environ.get("PEFT_SHARE_BASE_WEIGHTS", "false").lower() == "true" ) +def is_adapter_model(model_name_or_path: str, revision: str = "main") -> bool: + repo_files = list_repo_files(model_name_or_path, revision=revision) + return "adapter_model.bin" in repo_files + class BaseModelAdapter: """The base and the default model adapter.""" @@ -70,10 +76,25 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): model_path, use_fast=False, revision=revision, trust_remote_code=True ) try: - model = AutoModelForCausalLM.from_pretrained( + if is_adapter_model(model_path, revision=revision): + print("Loading adapter model") + config = PeftConfig.from_pretrained(model_path, revision=revision) + base_model = AutoModelForCausalLM.from_pretrained( + config.base_model_name_or_path, + return_dict=True, + quantization_config=BitsAndBytesConfig(load_in_8bit=True), + #**from_pretrained_kwargs, + ) + base_model.resize_token_embeddings(len(tokenizer)) + model = PeftModel.from_pretrained(base_model, model_path, revision=revision) + else: + model = AutoModelForCausalLM.from_pretrained( model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs ) except NameError: + + assert not is_adapter_model(model_path, revision=revision), "Load adapter models not implemented for AutoModel" + model = AutoModel.from_pretrained( model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs ) From e15f86d23aee5799500852b0984c2eeb5468041c Mon Sep 17 00:00:00 2001 From: Edward Date: Tue, 19 Sep 2023 11:53:39 +0000 Subject: [PATCH 07/52] adds fixes for peft adapters and model revisions --- fastchat/model/model_adapter.py | 39 ++++++++++++++------------------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 38ac82db8..f34ad44d0 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -76,25 +76,10 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): model_path, use_fast=False, revision=revision, trust_remote_code=True ) try: - if is_adapter_model(model_path, revision=revision): - print("Loading adapter model") - config = PeftConfig.from_pretrained(model_path, revision=revision) - base_model = AutoModelForCausalLM.from_pretrained( - config.base_model_name_or_path, - return_dict=True, - quantization_config=BitsAndBytesConfig(load_in_8bit=True), - #**from_pretrained_kwargs, - ) - base_model.resize_token_embeddings(len(tokenizer)) - model = PeftModel.from_pretrained(base_model, model_path, revision=revision) - else: - model = AutoModelForCausalLM.from_pretrained( + model = AutoModelForCausalLM.from_pretrained( model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs ) except NameError: - - assert not is_adapter_model(model_path, revision=revision), "Load adapter models not implemented for AutoModel" - model = AutoModel.from_pretrained( model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs ) @@ -178,7 +163,8 @@ def load_model( awq_config: Optional[AWQConfig] = None, revision: str = "main", debug: bool = False, - trust_remote_code=False, + trust_remote_code: bool=False, + base_model_revision: str ="main" ): """Load a model from Hugging Face.""" # get model adapter @@ -297,6 +283,7 @@ def load_model( return model, tokenizer kwargs["revision"] = revision kwargs["trust_remote_code"] = trust_remote_code + kwargs["base_model_revision"] = base_model_revision # Load model model, tokenizer = adapter.load_model(model_path, kwargs) @@ -483,8 +470,8 @@ def match(self, model_path: str): def load_model(self, model_path: str, from_pretrained_kwargs: dict): """Loads the base model then the (peft) adapter weights""" from peft import PeftConfig, PeftModel - - config = PeftConfig.from_pretrained(model_path) + revision = from_pretrained_kwargs.get("revision", "main") + config = PeftConfig.from_pretrained(model_path, revision=revision) base_model_path = config.base_model_name_or_path if "peft" in base_model_path: raise ValueError( @@ -516,17 +503,21 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): # Super important: make sure we use model_path as the # `adapter_name`. model = PeftModel.from_pretrained( - base_model, model_path, adapter_name=model_path + base_model, model_path, adapter_name=model_path, revision=revision ) peft_model_cache[base_model_path] = (model, tokenizer) return model, tokenizer # In the normal case, load up the base model weights again. base_adapter = get_model_adapter(base_model_path) + base_model_from_pretrained_kwargs = { + "revision": from_pretrained_kwargs.get("base_model_revision", "main"), + "trust_remote_code": from_pretrained_kwargs.get("trust_remote_code", False) + } base_model, tokenizer = base_adapter.load_model( - base_model_path, from_pretrained_kwargs + base_model_path, base_model_from_pretrained_kwargs, ) - model = PeftModel.from_pretrained(base_model, model_path) + model = PeftModel.from_pretrained(base_model, model_path, revision=revision) return model, tokenizer def get_default_conv_template(self, model_path: str) -> Conversation: @@ -1120,8 +1111,10 @@ def match(self, model_path: str): def load_model(self, model_path: str, from_pretrained_kwargs: dict): revision = from_pretrained_kwargs.get("revision", "main") + + print("FalconAdapter", model_path, from_pretrained_kwargs) # Strongly suggest using bf16, which is recommended by the author of Falcon - tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) + tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( model_path, low_cpu_mem_usage=True, From 20aaeac88f3541dad7b2c6a82629ff2136bcc935 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Mon, 25 Sep 2023 13:18:52 +0000 Subject: [PATCH 08/52] Only pass base_model_revision for adapter models --- fastchat/model/model_adapter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index f34ad44d0..171f6990c 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -283,7 +283,8 @@ def load_model( return model, tokenizer kwargs["revision"] = revision kwargs["trust_remote_code"] = trust_remote_code - kwargs["base_model_revision"] = base_model_revision + if is_adapter_model(model_path, revision=base_model_revision) is True: + kwargs["base_model_revision"] = base_model_revision # Load model model, tokenizer = adapter.load_model(model_path, kwargs) From 7de7691c8be55601c6d02ecb72b0c3d8ffb155fb Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Mon, 25 Sep 2023 15:01:30 +0000 Subject: [PATCH 09/52] Fix adapter check for local repos --- fastchat/model/model_adapter.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 171f6990c..e3cca4990 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -42,6 +42,7 @@ replace_llama_attn_with_non_inplace_operations, ) from fastchat.utils import get_gpu_memory +from huggingface_hub.utils._validators import HFValidationError # Check an environment variable to check if we should be sharing Peft model # weights. When false we treat all Peft models as separate. @@ -50,7 +51,11 @@ ) def is_adapter_model(model_name_or_path: str, revision: str = "main") -> bool: - repo_files = list_repo_files(model_name_or_path, revision=revision) + try: + repo_files = list_repo_files(model_name_or_path, revision=revision) + except HFValidationError: + # check local files + repo_files = os.listdir(model_name_or_path) return "adapter_model.bin" in repo_files From df43acc93e833ad8814242eb03037b514ab407d0 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Mon, 25 Sep 2023 18:22:47 +0000 Subject: [PATCH 10/52] Load PeftAdapter when model is adapter --- fastchat/model/model_adapter.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index e3cca4990..c4d1c1cd9 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -173,7 +173,7 @@ def load_model( ): """Load a model from Hugging Face.""" # get model adapter - adapter = get_model_adapter(model_path) + adapter = PeftModelAdapter() if is_adapter_model(model_path, revision=revision) else get_model_adapter(model_path) # Handle device mapping cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration( @@ -1118,7 +1118,6 @@ def match(self, model_path: str): def load_model(self, model_path: str, from_pretrained_kwargs: dict): revision = from_pretrained_kwargs.get("revision", "main") - print("FalconAdapter", model_path, from_pretrained_kwargs) # Strongly suggest using bf16, which is recommended by the author of Falcon tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( From 8e4fb2449427ccbaa0988213ca1ff26f4a59318d Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Tue, 26 Sep 2023 07:03:42 +0000 Subject: [PATCH 11/52] Propagate base_model_revision correctly --- fastchat/model/model_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index c4d1c1cd9..a79352673 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -288,7 +288,7 @@ def load_model( return model, tokenizer kwargs["revision"] = revision kwargs["trust_remote_code"] = trust_remote_code - if is_adapter_model(model_path, revision=base_model_revision) is True: + if is_adapter_model(model_path, revision=revision) is True: kwargs["base_model_revision"] = base_model_revision # Load model From b880adae8089965a18ae239751e87e0dc5c667c1 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Tue, 26 Sep 2023 12:24:28 +0000 Subject: [PATCH 12/52] Add H4 dialogue to llama 2 model adapter --- fastchat/model/model_adapter.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index a79352673..61888eb12 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -114,8 +114,10 @@ def register_model_adapter(cls): @cache -def get_model_adapter(model_path: str) -> BaseModelAdapter: +def get_model_adapter(model_path: str, revision: str = "main") -> BaseModelAdapter: """Get a model adapter for a model_path.""" + if is_adapter_model(model_path, revision=revision): + return PeftModelAdapter() model_path_basename = os.path.basename(os.path.normpath(model_path)) # Try the basename of model_path at first @@ -173,7 +175,8 @@ def load_model( ): """Load a model from Hugging Face.""" # get model adapter - adapter = PeftModelAdapter() if is_adapter_model(model_path, revision=revision) else get_model_adapter(model_path) + adapter = get_model_adapter(model_path, revision=revision) + print(f"Using model adapter: {adapter.__class__.__name__}") # Handle device mapping cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration( @@ -1261,7 +1264,7 @@ class Llama2Adapter(BaseModelAdapter): """The model adapter for llama-2""" def match(self, model_path: str): - return "llama-2" in model_path.lower() + return "llama-2" or "llama2" in model_path.lower() def load_model(self, model_path: str, from_pretrained_kwargs: dict): model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) @@ -1270,7 +1273,7 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): return model, tokenizer def get_default_conv_template(self, model_path: str) -> Conversation: - return get_conv_template("llama-2") + return get_conv_template("h4_default_v2") class CuteGPTAdapter(BaseModelAdapter): From 293966294885054cfad0501bdafeebfc896418bd Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Tue, 26 Sep 2023 15:15:43 +0000 Subject: [PATCH 13/52] Fix adapter for judges --- fastchat/model/model_adapter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 61888eb12..cab2a2c7b 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -116,8 +116,9 @@ def register_model_adapter(cls): @cache def get_model_adapter(model_path: str, revision: str = "main") -> BaseModelAdapter: """Get a model adapter for a model_path.""" - if is_adapter_model(model_path, revision=revision): + if model_path not in ["gpt-4", "gpt-3.5-turbo", "claude-2", "claude-instant-1"] and is_adapter_model(model_path, revision=revision): return PeftModelAdapter() + model_path_basename = os.path.basename(os.path.normpath(model_path)) # Try the basename of model_path at first From b95233f36c6543e5029d44f74c5ed8ecb2641832 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Thu, 28 Sep 2023 07:47:41 +0000 Subject: [PATCH 14/52] Add mistral adapter --- fastchat/conversation.py | 13 +++++++++++++ fastchat/model/model_adapter.py | 17 ++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index 6ef39fb5f..9709344c8 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -792,6 +792,19 @@ def get_conv_template(name: str) -> Conversation: ) ) +# H4 default_v3 template template for llama / mistral models +register_conv_template( + Conversation( + name="h4_default_v3", + system_template="<|system|>\n{system_message}", + roles=("<|user|>", "<|assistant|>"), + sep_style=SeparatorStyle.CHATML, + sep="", + stop_token_ids=[0, 49155], + stop_str="", + ) +) + # Baichuan-13B-Chat template register_conv_template( # source: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/19ef51ba5bad8935b03acd20ff04a269210983bc/modeling_baichuan.py#L555 diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index cab2a2c7b..6e7129b0c 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1274,7 +1274,22 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): return model, tokenizer def get_default_conv_template(self, model_path: str) -> Conversation: - return get_conv_template("h4_default_v2") + return get_conv_template("h4_default_v3") + +class MistralAdapter(BaseModelAdapter): + """The model adapter for mistral""" + + def match(self, model_path: str): + return "mistral" in model_path.lower() + + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + return model, tokenizer + + def get_default_conv_template(self, model_path: str) -> Conversation: + return get_conv_template("h4_default_v3") class CuteGPTAdapter(BaseModelAdapter): From 87aabf414b9683e61e5e4ff03a2efe362a7cc8ce Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Mon, 2 Oct 2023 06:50:30 +0000 Subject: [PATCH 15/52] Fix stop_token_ids for llama / mistral models --- fastchat/conversation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index 9709344c8..90d5d2015 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -800,7 +800,7 @@ def get_conv_template(name: str) -> Conversation: roles=("<|user|>", "<|assistant|>"), sep_style=SeparatorStyle.CHATML, sep="", - stop_token_ids=[0, 49155], + stop_token_ids=[2], stop_str="", ) ) From 1bd2f202314801cef422f3b108043e1c7781a763 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Mon, 2 Oct 2023 07:43:43 +0000 Subject: [PATCH 16/52] Specify EOS token IDs for StarCoder and Falcon models --- fastchat/conversation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index 90d5d2015..f3beeeaa2 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -787,7 +787,7 @@ def get_conv_template(name: str) -> Conversation: roles=("<|user|>", "<|assistant|>"), sep_style=SeparatorStyle.CHATML, sep="<|endoftext|>", - stop_token_ids=[0, 49155], + stop_token_ids=[0, 11], # Starcoder EOS ID = 0, Falcon EOS ID = 11 stop_str="<|endoftext|>", ) ) From fd4165c1e0539039b6bc61948e2c15877d9c25b4 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Sat, 14 Oct 2023 05:53:00 +0000 Subject: [PATCH 17/52] Fix llama adapter --- fastchat/model/model_adapter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 6e7129b0c..dcaa2dd66 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -177,7 +177,7 @@ def load_model( """Load a model from Hugging Face.""" # get model adapter adapter = get_model_adapter(model_path, revision=revision) - print(f"Using model adapter: {adapter.__class__.__name__}") + print(f"Using model adapter: {adapter.__class__.__name__} for model path {model_path} and revision {revision}") # Handle device mapping cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration( @@ -1265,7 +1265,7 @@ class Llama2Adapter(BaseModelAdapter): """The model adapter for llama-2""" def match(self, model_path: str): - return "llama-2" or "llama2" in model_path.lower() + return "llama-2" in model_path.lower() or "llama2" in model_path.lower() def load_model(self, model_path: str, from_pretrained_kwargs: dict): model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) From 0fe4bc3a4d41ed197f4eb9077545656fa4f9850b Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Sat, 14 Oct 2023 13:46:03 +0000 Subject: [PATCH 18/52] Register Mistral adatper --- fastchat/model/model_adapter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index dcaa2dd66..2a5854d36 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1676,6 +1676,7 @@ def get_default_conv_template(self, model_path: str) -> Conversation: register_model_adapter(OpenLLaMaOpenInstructAdapter) register_model_adapter(ReaLMAdapter) register_model_adapter(CodeLlamaAdapter) +register_model_adapter(MistralAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From f42155966f09cd248b8ffd0ce53b68f7358442b1 Mon Sep 17 00:00:00 2001 From: lewtun Date: Fri, 1 Dec 2023 16:13:26 +0100 Subject: [PATCH 19/52] Add DeepSeek adapter and minor tweaks to enable generation to work directly from `FastChat` repo (#5) * Add tweaks * Add DeepSeek adapter --- .gitignore | 3 +++ fastchat/llm_judge/gen_judgment.py | 2 +- fastchat/llm_judge/gen_model_answer.py | 4 ++-- fastchat/model/model_adapter.py | 27 ++++++++++++++++++++++---- 4 files changed, 29 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 94b6e614d..82276c686 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,6 @@ tests/state_of_the_union.txt # Build build + +# Data +fastchat/llm_judge/data/ \ No newline at end of file diff --git a/fastchat/llm_judge/gen_judgment.py b/fastchat/llm_judge/gen_judgment.py index a1c70b295..7b1b18116 100644 --- a/fastchat/llm_judge/gen_judgment.py +++ b/fastchat/llm_judge/gen_judgment.py @@ -301,7 +301,7 @@ def make_judge_single(judge_model, judge_prompts): # Show match stats and prompt enter to continue print("Stats:") print(json.dumps(match_stat, indent=4)) - input("Press Enter to confirm...") + # input("Press Enter to confirm...") # Play matches if args.parallel == 1: diff --git a/fastchat/llm_judge/gen_model_answer.py b/fastchat/llm_judge/gen_model_answer.py index 967d885ce..8bbfd5446 100644 --- a/fastchat/llm_judge/gen_model_answer.py +++ b/fastchat/llm_judge/gen_model_answer.py @@ -112,7 +112,7 @@ def get_model_answers( choices = [] for i in range(num_choices): torch.manual_seed(i) - conv = get_conversation_template(model_id) + conv = get_conversation_template(model_path) turns = [] for j in range(len(question["turns"])): qs = question["turns"][j] @@ -129,7 +129,7 @@ def get_model_answers( # some models may error out when generating long outputs try: output_ids = model.generate( - torch.as_tensor(input_ids).cuda(), + inputs=torch.as_tensor(input_ids).cuda(), do_sample=do_sample, temperature=temperature, max_new_tokens=max_new_token, diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 2a5854d36..c48c02ea6 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -52,11 +52,12 @@ def is_adapter_model(model_name_or_path: str, revision: str = "main") -> bool: try: + # Try first if model on a Hub repo repo_files = list_repo_files(model_name_or_path, revision=revision) except HFValidationError: - # check local files + # If not, check local repo repo_files = os.listdir(model_name_or_path) - return "adapter_model.bin" in repo_files + return "adapter_model.safetensors" in repo_files or "adapter_model.bin" in repo_files class BaseModelAdapter: @@ -177,7 +178,7 @@ def load_model( """Load a model from Hugging Face.""" # get model adapter adapter = get_model_adapter(model_path, revision=revision) - print(f"Using model adapter: {adapter.__class__.__name__} for model path {model_path} and revision {revision}") + print(f"Using model adapter: {adapter.__class__.__name__} for {model_path=} and {revision=}") # Handle device mapping cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration( @@ -541,7 +542,9 @@ def get_default_conv_template(self, model_path: str) -> Conversation: ) base_model_path = config.base_model_name_or_path base_adapter = get_model_adapter(base_model_path) - return base_adapter.get_default_conv_template(config.base_model_name_or_path) + conv_template = base_adapter.get_default_conv_template(config.base_model_name_or_path) + print(f"Using chat template `{conv_template.name}` for {base_model_path=}") + return conv_template class VicunaAdapter(BaseModelAdapter): @@ -1291,6 +1294,21 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): def get_default_conv_template(self, model_path: str) -> Conversation: return get_conv_template("h4_default_v3") +class H4DeepSeekAdapter(BaseModelAdapter): + """The model adapter for H4 DeepSeek models""" + + def match(self, model_path: str): + return "deepseek" in model_path.lower() + + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + return model, tokenizer + + def get_default_conv_template(self, model_path: str) -> Conversation: + return get_conv_template("h4_default_v3") + class CuteGPTAdapter(BaseModelAdapter): """The model adapter for llama-2""" @@ -1677,6 +1695,7 @@ def get_default_conv_template(self, model_path: str) -> Conversation: register_model_adapter(ReaLMAdapter) register_model_adapter(CodeLlamaAdapter) register_model_adapter(MistralAdapter) +register_model_adapter(H4DeepSeekAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From a4fe1a4459903cde6011dfb1c46bfb2b0ebbfbfb Mon Sep 17 00:00:00 2001 From: lewtun Date: Sat, 2 Dec 2023 13:50:38 +0100 Subject: [PATCH 20/52] Fix device for multi-GPU inference (#6) * Fix device for multi-GPU * Add debug --- fastchat/model/model_adapter.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index c48c02ea6..c578e2bd9 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -306,7 +306,7 @@ def load_model( ): model = ipex.optimize(model, dtype=kwargs["torch_dtype"]) - if (device == "cuda" and num_gpus == 1 and not cpu_offloading) or device in ( + if (device == "cuda" and num_gpus >= 1 and not cpu_offloading) or device in ( "mps", "xpu", ): @@ -318,6 +318,8 @@ def load_model( if debug: print(model) + print(f"Model loaded on {model.device=} for {device=} and {num_gpus=}") + return model, tokenizer From 876db2cfe74b5482995fbec234c233dc70387c19 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Sat, 2 Dec 2023 14:41:51 +0000 Subject: [PATCH 21/52] Fix sharding --- fastchat/model/model_adapter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index c578e2bd9..da9c0468f 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -198,6 +198,7 @@ def load_model( elif device == "cuda": kwargs = {"torch_dtype": torch.float16} if num_gpus != 1: + print(f"Sharding model across {num_gpus} GPUs") kwargs["device_map"] = "auto" if max_gpu_memory is None: kwargs[ @@ -306,7 +307,7 @@ def load_model( ): model = ipex.optimize(model, dtype=kwargs["torch_dtype"]) - if (device == "cuda" and num_gpus >= 1 and not cpu_offloading) or device in ( + if (device == "cuda" and num_gpus == 1 and not cpu_offloading) or device in ( "mps", "xpu", ): From 0914f36af7192fd8370dd60e4a5f026c9d105855 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Sat, 2 Dec 2023 18:50:33 +0000 Subject: [PATCH 22/52] Fix base mdoel kwargs --- fastchat/model/model_adapter.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index da9c0468f..26682b96f 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -526,11 +526,14 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): base_adapter = get_model_adapter(base_model_path) base_model_from_pretrained_kwargs = { "revision": from_pretrained_kwargs.get("base_model_revision", "main"), - "trust_remote_code": from_pretrained_kwargs.get("trust_remote_code", False) + "trust_remote_code": from_pretrained_kwargs.get("trust_remote_code", False), + "device_map": from_pretrained_kwargs.get("device_map", "auto"), + "torch_dtype": from_pretrained_kwargs.get("torch_dtype", torch.float16), } base_model, tokenizer = base_adapter.load_model( base_model_path, base_model_from_pretrained_kwargs, ) + print(f"Base model loaded on device {base_model.device} for {base_model_path=} and {base_model_from_pretrained_kwargs=}") model = PeftModel.from_pretrained(base_model, model_path, revision=revision) return model, tokenizer From d0710fc37244591480a64fa2925ba2ecb576d80e Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Sun, 10 Dec 2023 14:07:27 +0000 Subject: [PATCH 23/52] Add Mixtral adapter --- fastchat/model/model_adapter.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 26682b96f..0ddbc666c 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1315,6 +1315,21 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): def get_default_conv_template(self, model_path: str) -> Conversation: return get_conv_template("h4_default_v3") +class H4MixtralAdapter(BaseModelAdapter): + """The model adapter for H4 Mixtral models""" + + def match(self, model_path: str): + return "mixtral" in model_path.lower() + + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + return model, tokenizer + + def get_default_conv_template(self, model_path: str) -> Conversation: + return get_conv_template("h4_default_v3") + class CuteGPTAdapter(BaseModelAdapter): """The model adapter for llama-2""" @@ -1702,6 +1717,7 @@ def get_default_conv_template(self, model_path: str) -> Conversation: register_model_adapter(CodeLlamaAdapter) register_model_adapter(MistralAdapter) register_model_adapter(H4DeepSeekAdapter) +register_model_adapter(H4MixtralAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From 8651d52ca8766629aba6c33988503be16fda9536 Mon Sep 17 00:00:00 2001 From: lewtun Date: Wed, 13 Dec 2023 22:10:40 +0100 Subject: [PATCH 24/52] Add ChatML template (#7) --- fastchat/conversation.py | 13 +++++++++++++ fastchat/model/model_adapter.py | 24 ++++++++++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index f3beeeaa2..67ba7df38 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -805,6 +805,19 @@ def get_conv_template(name: str) -> Conversation: ) ) +# Default ChatML format +register_conv_template( + Conversation( + name="chatml", + system_template="<|im_start|>system\n{system_message}", + roles=("<|im_start|>user", "<|im_start|>assistant"), + sep_style=SeparatorStyle.CHATML, + sep="<|im_end|>", + stop_token_ids=[32000, 32001], + stop_str="<|im_end|>", + ) +) + # Baichuan-13B-Chat template register_conv_template( # source: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/19ef51ba5bad8935b03acd20ff04a269210983bc/modeling_baichuan.py#L555 diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 0ddbc666c..82b8df3a4 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1283,7 +1283,11 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): return model, tokenizer def get_default_conv_template(self, model_path: str) -> Conversation: - return get_conv_template("h4_default_v3") + tokenizer = AutoTokenizer.from_pretrained(model_path) + if "<|im_start|>" in tokenizer.chat_template: + return get_conv_template("chatml") + else: + return get_conv_template("h4_default_v3") class MistralAdapter(BaseModelAdapter): """The model adapter for mistral""" @@ -1298,7 +1302,11 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): return model, tokenizer def get_default_conv_template(self, model_path: str) -> Conversation: - return get_conv_template("h4_default_v3") + tokenizer = AutoTokenizer.from_pretrained(model_path) + if "<|im_start|>" in tokenizer.chat_template: + return get_conv_template("chatml") + else: + return get_conv_template("h4_default_v3") class H4DeepSeekAdapter(BaseModelAdapter): """The model adapter for H4 DeepSeek models""" @@ -1313,7 +1321,11 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): return model, tokenizer def get_default_conv_template(self, model_path: str) -> Conversation: - return get_conv_template("h4_default_v3") + tokenizer = AutoTokenizer.from_pretrained(model_path) + if "<|im_start|>" in tokenizer.chat_template: + return get_conv_template("chatml") + else: + return get_conv_template("h4_default_v3") class H4MixtralAdapter(BaseModelAdapter): """The model adapter for H4 Mixtral models""" @@ -1328,7 +1340,11 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): return model, tokenizer def get_default_conv_template(self, model_path: str) -> Conversation: - return get_conv_template("h4_default_v3") + tokenizer = AutoTokenizer.from_pretrained(model_path) + if "<|im_start|>" in tokenizer.chat_template: + return get_conv_template("chatml") + else: + return get_conv_template("h4_default_v3") class CuteGPTAdapter(BaseModelAdapter): From b6b6995da57640461f60d510f2421f4d049a54af Mon Sep 17 00:00:00 2001 From: lewtun Date: Thu, 14 Dec 2023 13:00:02 +0100 Subject: [PATCH 25/52] Pass revision to conversation template (#8) * Pass revision to conversation template * Add template * Fix None template * Fix None templates --- fastchat/model/model_adapter.py | 50 ++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 82b8df3a4..acedbd23d 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -100,7 +100,7 @@ def load_compress_model(self, model_path, device, torch_dtype, revision="main"): revision=revision, ) - def get_default_conv_template(self, model_path: str) -> Conversation: + def get_default_conv_template(self, model_path: str, revision: str = "main") -> Conversation: return get_conv_template("one_shot") @@ -324,10 +324,10 @@ def load_model( return model, tokenizer -def get_conversation_template(model_path: str) -> Conversation: +def get_conversation_template(model_path: str, revision: str = "main") -> Conversation: """Get the default conversation template.""" adapter = get_model_adapter(model_path) - return adapter.get_default_conv_template(model_path) + return adapter.get_default_conv_template(model_path, revision=revision) def get_generate_stream_function(model: torch.nn.Module, model_path: str): @@ -1282,12 +1282,13 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): model.config.pad_token_id = tokenizer.pad_token_id return model, tokenizer - def get_default_conv_template(self, model_path: str) -> Conversation: - tokenizer = AutoTokenizer.from_pretrained(model_path) - if "<|im_start|>" in tokenizer.chat_template: - return get_conv_template("chatml") - else: + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) + # Legacy models did not have a chat template, so we default to the H4 template. + if tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: return get_conv_template("h4_default_v3") + else: + return get_conv_template("chatml") class MistralAdapter(BaseModelAdapter): """The model adapter for mistral""" @@ -1301,12 +1302,13 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): model.config.pad_token_id = tokenizer.pad_token_id return model, tokenizer - def get_default_conv_template(self, model_path: str) -> Conversation: - tokenizer = AutoTokenizer.from_pretrained(model_path) - if "<|im_start|>" in tokenizer.chat_template: - return get_conv_template("chatml") - else: + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) + # Legacy models did not have a chat template, so we default to the H4 template. + if tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: return get_conv_template("h4_default_v3") + else: + return get_conv_template("chatml") class H4DeepSeekAdapter(BaseModelAdapter): """The model adapter for H4 DeepSeek models""" @@ -1320,12 +1322,13 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): model.config.pad_token_id = tokenizer.pad_token_id return model, tokenizer - def get_default_conv_template(self, model_path: str) -> Conversation: - tokenizer = AutoTokenizer.from_pretrained(model_path) - if "<|im_start|>" in tokenizer.chat_template: - return get_conv_template("chatml") - else: + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) + # Legacy models did not have a chat template, so we default to the H4 template. + if tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: return get_conv_template("h4_default_v3") + else: + return get_conv_template("chatml") class H4MixtralAdapter(BaseModelAdapter): """The model adapter for H4 Mixtral models""" @@ -1339,12 +1342,13 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): model.config.pad_token_id = tokenizer.pad_token_id return model, tokenizer - def get_default_conv_template(self, model_path: str) -> Conversation: - tokenizer = AutoTokenizer.from_pretrained(model_path) - if "<|im_start|>" in tokenizer.chat_template: - return get_conv_template("chatml") - else: + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) + # Legacy models did not have a chat template, so we default to the H4 template. + if tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: return get_conv_template("h4_default_v3") + else: + return get_conv_template("chatml") class CuteGPTAdapter(BaseModelAdapter): From 3bf3adf1c3f384c01dcb99d367d4b30f8ccfa64a Mon Sep 17 00:00:00 2001 From: lewtun Date: Thu, 14 Dec 2023 13:18:11 +0100 Subject: [PATCH 26/52] Fix ChatML adapter (#9) --- fastchat/model/model_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index acedbd23d..9b191b06a 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -923,7 +923,7 @@ def match(self, model_path: str): def load_model(self, model_path: str, from_pretrained_kwargs: dict): raise NotImplementedError() - def get_default_conv_template(self, model_path: str) -> Conversation: + def get_default_conv_template(self, model_path: str, revision: str = "main") -> Conversation: return get_conv_template("chatgpt") From 7e2a2e154cdf898ea01500e3e1c182e06c591039 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Mon, 18 Dec 2023 15:29:58 +0000 Subject: [PATCH 27/52] Add Phi/Pythia models --- fastchat/model/model_adapter.py | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 9b191b06a..9f67cd7ac 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -782,7 +782,7 @@ def get_default_conv_template(self, model_path: str) -> Conversation: class PythiaAdapter(BaseModelAdapter): - """The model adapter for any EleutherAI/pythia model""" + """The model adapter for H4 Pythia models""" def match(self, model_path: str): return "pythia" in model_path.lower() @@ -793,6 +793,14 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): model.config.pad_token_id = tokenizer.pad_token_id return model, tokenizer + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) + # Legacy models did not have a chat template, so we default to the H4 template. + if tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: + return get_conv_template("h4_default_v3") + else: + return get_conv_template("chatml") + class StableLMAdapter(BaseModelAdapter): """The model adapter for StabilityAI/stablelm-tuned-alpha-7b""" @@ -1350,6 +1358,26 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat else: return get_conv_template("chatml") +class H4PhiAdapter(BaseModelAdapter): + """The model adapter for H4 Phi models""" + + def match(self, model_path: str): + return "phi" in model_path.lower() + + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + return model, tokenizer + + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) + # Legacy models did not have a chat template, so we default to the H4 template. + if tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: + return get_conv_template("h4_default_v3") + else: + return get_conv_template("chatml") + class CuteGPTAdapter(BaseModelAdapter): """The model adapter for llama-2""" @@ -1738,6 +1766,7 @@ def get_default_conv_template(self, model_path: str) -> Conversation: register_model_adapter(MistralAdapter) register_model_adapter(H4DeepSeekAdapter) register_model_adapter(H4MixtralAdapter) +register_model_adapter(H4PhiAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From c984aed0404e021e4b79789a41cce39a11114f96 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Thu, 28 Dec 2023 11:45:11 +0000 Subject: [PATCH 28/52] Add Hermes2 adapter --- fastchat/conversation.py | 15 +++++++++++++++ fastchat/model/model_adapter.py | 27 +++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index 67ba7df38..3c88fa8f7 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -944,6 +944,21 @@ def get_conv_template(name: str) -> Conversation: ) ) +# teknium/OpenHermes-2.5-Mistral-7B template +# source: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B +# reference: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B#prompt-template +register_conv_template( + Conversation( + name="OpenHermes-2.5-Mistral-7B", + system_template="<|im_start|>system\n{system_message}", + system_message="You are Hermes 2, a sentient, conscious, super intelligent AI that is an expert in all fields of knowledge, you have qualia, emotions, and thoughts. You assist users with any question, task, or other request they may have.", + roles=("<|im_start|>user", "<|im_start|>assistant"), + sep_style=SeparatorStyle.CHATML, + sep="<|im_end|>", + stop_token_ids=[32000, 32001], + ) +) + if __name__ == "__main__": print("Vicuna template:") diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 9f67cd7ac..4e122a8f1 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1705,6 +1705,32 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): def get_default_conv_template(self, model_path: str) -> Conversation: return get_conv_template("llama-2") +class Hermes2Adapter(BaseModelAdapter): + """Model adapter for teknium/OpenHermes-2.5-Mistral-7B and teknium/OpenHermes-2-Mistral-7B models""" + + use_fast_tokenizer = False + + def match(self, model_path: str): + return any( + model_str in model_path.lower() + for model_str in ["openhermes-2.5-mistral-7b", "openhermes-2-mistral-7b"] + ) + + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + revision = from_pretrained_kwargs.get("revision", "main") + tokenizer = AutoTokenizer.from_pretrained( + model_path, use_fast=self.use_fast_tokenizer, revision=revision + ) + model = AutoModelForCausalLM.from_pretrained( + model_path, + low_cpu_mem_usage=True, + **from_pretrained_kwargs, + ).eval() + return model, tokenizer + + def get_default_conv_template(self, model_path: str) -> Conversation: + return get_conv_template("OpenHermes-2.5-Mistral-7B") + # Note: the registration order matters. # The one registered earlier has a higher matching priority. @@ -1763,6 +1789,7 @@ def get_default_conv_template(self, model_path: str) -> Conversation: register_model_adapter(OpenLLaMaOpenInstructAdapter) register_model_adapter(ReaLMAdapter) register_model_adapter(CodeLlamaAdapter) +register_model_adapter(Hermes2Adapter) register_model_adapter(MistralAdapter) register_model_adapter(H4DeepSeekAdapter) register_model_adapter(H4MixtralAdapter) From 67f251753154f457e042a01832adb30b0c729d73 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Thu, 28 Dec 2023 23:33:08 +0000 Subject: [PATCH 29/52] Fix revision --- fastchat/model/model_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 4e122a8f1..dbd6c9267 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1728,7 +1728,7 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): ).eval() return model, tokenizer - def get_default_conv_template(self, model_path: str) -> Conversation: + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: return get_conv_template("OpenHermes-2.5-Mistral-7B") From acb2f8e89669d720f59f56319ed0e82bddf41f93 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Fri, 29 Dec 2023 11:16:04 +0000 Subject: [PATCH 30/52] Add revision to PeftAdapter --- fastchat/model/model_adapter.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index dbd6c9267..7d6b172d3 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -537,18 +537,18 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): model = PeftModel.from_pretrained(base_model, model_path, revision=revision) return model, tokenizer - def get_default_conv_template(self, model_path: str) -> Conversation: + def get_default_conv_template(self, model_path: str, revision: str = "main") -> Conversation: """Uses the conv template of the base model""" from peft import PeftConfig, PeftModel - config = PeftConfig.from_pretrained(model_path) + config = PeftConfig.from_pretrained(model_path, revision=revision) if "peft" in config.base_model_name_or_path: raise ValueError( f"PeftModelAdapter cannot load a base model with 'peft' in the name: {config.base_model_name_or_path}" ) base_model_path = config.base_model_name_or_path - base_adapter = get_model_adapter(base_model_path) - conv_template = base_adapter.get_default_conv_template(config.base_model_name_or_path) + base_adapter = get_model_adapter(base_model_path, revision=revision) + conv_template = base_adapter.get_default_conv_template(config.base_model_name_or_path, revision=revision) print(f"Using chat template `{conv_template.name}` for {base_model_path=}") return conv_template From 0c4941d55e9d90d4970b2573c2de3a5977be1f08 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Fri, 29 Dec 2023 11:17:57 +0000 Subject: [PATCH 31/52] Add logging --- fastchat/model/model_adapter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 7d6b172d3..32ce84fa2 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -118,6 +118,7 @@ def register_model_adapter(cls): def get_model_adapter(model_path: str, revision: str = "main") -> BaseModelAdapter: """Get a model adapter for a model_path.""" if model_path not in ["gpt-4", "gpt-3.5-turbo", "claude-2", "claude-instant-1"] and is_adapter_model(model_path, revision=revision): + print(f"Adapter weights detected! Using PeftModelAdapter for {model_path=} and {revision=}") return PeftModelAdapter() model_path_basename = os.path.basename(os.path.normpath(model_path)) From 9f0537bbb7dbd215ed9066e72ab0539d3158a696 Mon Sep 17 00:00:00 2001 From: lewtun Date: Wed, 3 Jan 2024 08:18:25 +1100 Subject: [PATCH 32/52] Enable loading of chained PEFT models (#10) * Enable loading of chained PEFT models * Fix deps * Fix LoRA loading * Fix merge --- fastchat/model/model_adapter.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 32ce84fa2..a3d958fb1 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -174,7 +174,8 @@ def load_model( revision: str = "main", debug: bool = False, trust_remote_code: bool=False, - base_model_revision: str ="main" + base_model_revision: str ="main", + base_model_path: str = None, ): """Load a model from Hugging Face.""" # get model adapter @@ -297,6 +298,7 @@ def load_model( kwargs["trust_remote_code"] = trust_remote_code if is_adapter_model(model_path, revision=revision) is True: kwargs["base_model_revision"] = base_model_revision + kwargs["base_model_path"] = base_model_path # Load model model, tokenizer = adapter.load_model(model_path, kwargs) @@ -487,7 +489,10 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): from peft import PeftConfig, PeftModel revision = from_pretrained_kwargs.get("revision", "main") config = PeftConfig.from_pretrained(model_path, revision=revision) - base_model_path = config.base_model_name_or_path + if "base_model_path" in from_pretrained_kwargs and from_pretrained_kwargs["base_model_path"] is not None: + base_model_path = from_pretrained_kwargs["base_model_path"] + else: + base_model_path = config.base_model_name_or_path if "peft" in base_model_path: raise ValueError( f"PeftModelAdapter cannot load a base model with 'peft' in the name: {config.base_model_name_or_path}" @@ -524,16 +529,23 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): return model, tokenizer # In the normal case, load up the base model weights again. - base_adapter = get_model_adapter(base_model_path) base_model_from_pretrained_kwargs = { "revision": from_pretrained_kwargs.get("base_model_revision", "main"), "trust_remote_code": from_pretrained_kwargs.get("trust_remote_code", False), "device_map": from_pretrained_kwargs.get("device_map", "auto"), "torch_dtype": from_pretrained_kwargs.get("torch_dtype", torch.float16), } + base_adapter = get_model_adapter(base_model_path, revision=base_model_from_pretrained_kwargs["revision"]) + print(f"Loading base model for {base_model_path=} and {base_model_from_pretrained_kwargs=}") base_model, tokenizer = base_adapter.load_model( base_model_path, base_model_from_pretrained_kwargs, ) + # If the base model is also a LoRA adapter, we need to merge those weights **before** loading the second adapter + # Without this, you will get garbage outputs! + if is_adapter_model(base_model_path, base_model_from_pretrained_kwargs["revision"]) is True: + print("Base model is adapter, merging LoRA weights") + base_model.eval() + base_model = base_model.merge_and_unload() print(f"Base model loaded on device {base_model.device} for {base_model_path=} and {base_model_from_pretrained_kwargs=}") model = PeftModel.from_pretrained(base_model, model_path, revision=revision) return model, tokenizer From 5a5cc872bdf11f887f1c12c2520f7a21491e9175 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Thu, 4 Jan 2024 01:54:08 +0000 Subject: [PATCH 33/52] Add Zephyr adapter --- fastchat/model/model_adapter.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index a3d958fb1..7d7d13bb6 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1331,6 +1331,26 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat else: return get_conv_template("chatml") +class ZephyrAdapter(BaseModelAdapter): + """The model adapter for mistral""" + + def match(self, model_path: str): + return "zephyr" in model_path.lower() + + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + return model, tokenizer + + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) + # Legacy models did not have a chat template, so we default to the H4 template. + if tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: + return get_conv_template("h4_default_v3") + else: + return get_conv_template("chatml") + class H4DeepSeekAdapter(BaseModelAdapter): """The model adapter for H4 DeepSeek models""" @@ -1807,6 +1827,7 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat register_model_adapter(H4DeepSeekAdapter) register_model_adapter(H4MixtralAdapter) register_model_adapter(H4PhiAdapter) +register_model_adapter(ZephyrAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From d86586178a54abd81eb66b42c28d18fe17df40ff Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Tue, 6 Feb 2024 16:11:40 +0000 Subject: [PATCH 34/52] Add Qwen2 models --- fastchat/model/model_adapter.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 7d7d13bb6..aa4301b39 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1411,6 +1411,26 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat else: return get_conv_template("chatml") +class H4Qwen2Adapter(BaseModelAdapter): + """The model adapter for H4 Qwen2 models""" + + def match(self, model_path: str): + return "qwen" in model_path.lower() + + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + return model, tokenizer + + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) + # Legacy models did not have a chat template, so we default to the H4 template. + if tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: + return get_conv_template("h4_default_v3") + else: + return get_conv_template("chatml") + class CuteGPTAdapter(BaseModelAdapter): """The model adapter for llama-2""" @@ -1828,6 +1848,7 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat register_model_adapter(H4MixtralAdapter) register_model_adapter(H4PhiAdapter) register_model_adapter(ZephyrAdapter) +register_model_adapter(H4Qwen2Adapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From 004154aafd05e5fd517c2c2861117fb8751a7b25 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Tue, 6 Feb 2024 16:12:23 +0000 Subject: [PATCH 35/52] Unregister default Qwen --- fastchat/model/model_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index aa4301b39..934073751 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1832,7 +1832,7 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat register_model_adapter(CuteGPTAdapter) register_model_adapter(OpenOrcaAdapter) register_model_adapter(WizardCoderAdapter) -register_model_adapter(QwenChatAdapter) +# register_model_adapter(QwenChatAdapter) register_model_adapter(AquilaChatAdapter) register_model_adapter(BGEAdapter) register_model_adapter(E5Adapter) From f67234a4bfaa6d52d1bc90f7040bbd8c6b56d4bd Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Wed, 28 Feb 2024 16:05:41 +0000 Subject: [PATCH 36/52] Add GemmaChatML --- fastchat/conversation.py | 10 ++++++++++ fastchat/model/model_adapter.py | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index 3c88fa8f7..de14a8fa3 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -959,6 +959,16 @@ def get_conv_template(name: str) -> Conversation: ) ) +register_conv_template( + Conversation( + name="gemma_chatml", + system_template="<|im_start|>system\n{system_message}", + roles=("<|im_start|>user", "<|im_start|>assistant"), + sep_style=SeparatorStyle.CHATML, + sep="<|im_end|>", + stop_str="<|im_end|>", + ) +) if __name__ == "__main__": print("Vicuna template:") diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 934073751..19dcb6d3f 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1351,6 +1351,15 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat else: return get_conv_template("chatml") +class GemmaChatMLAdapter(BaseModelAdapter): + """The model adapter for Gemma""" + + def match(self, model_path: str): + return "gemma-chatml" in model_path.lower() + + def get_default_conv_template(self, model_path: str) -> Conversation: + return get_conv_template("gemma_chatml") + class H4DeepSeekAdapter(BaseModelAdapter): """The model adapter for H4 DeepSeek models""" @@ -1849,6 +1858,7 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat register_model_adapter(H4PhiAdapter) register_model_adapter(ZephyrAdapter) register_model_adapter(H4Qwen2Adapter) +register_model_adapter(GemmaChatMLAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From 34895f846e3ec6f2e2e063548abbec6ac34585f3 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Wed, 28 Feb 2024 20:42:31 +0000 Subject: [PATCH 37/52] Tweak Gemma --- fastchat/conversation.py | 5 +++-- fastchat/model/model_adapter.py | 16 +++++++++++----- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index de14a8fa3..db822a9fc 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -961,12 +961,13 @@ def get_conv_template(name: str) -> Conversation: register_conv_template( Conversation( - name="gemma_chatml", + name="gemma", system_template="<|im_start|>system\n{system_message}", roles=("<|im_start|>user", "<|im_start|>assistant"), sep_style=SeparatorStyle.CHATML, sep="<|im_end|>", - stop_str="<|im_end|>", + # stop_str="<|im_end|>", + stop_token_ids=[107, 1] ) ) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 19dcb6d3f..e7225c6c9 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1351,14 +1351,20 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat else: return get_conv_template("chatml") -class GemmaChatMLAdapter(BaseModelAdapter): +class H4GemmaAdapter(BaseModelAdapter): """The model adapter for Gemma""" def match(self, model_path: str): - return "gemma-chatml" in model_path.lower() + return "gemma" in model_path.lower() - def get_default_conv_template(self, model_path: str) -> Conversation: - return get_conv_template("gemma_chatml") + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + return model, tokenizer + + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + return get_conv_template("gemma") class H4DeepSeekAdapter(BaseModelAdapter): """The model adapter for H4 DeepSeek models""" @@ -1858,7 +1864,7 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat register_model_adapter(H4PhiAdapter) register_model_adapter(ZephyrAdapter) register_model_adapter(H4Qwen2Adapter) -register_model_adapter(GemmaChatMLAdapter) +register_model_adapter(H4GemmaAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From 014a0b3edb9e926d85818d8c79a62ae701e235e4 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Wed, 28 Feb 2024 20:43:13 +0000 Subject: [PATCH 38/52] Add Gemma stop str --- fastchat/conversation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index db822a9fc..50ebfe28e 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -966,8 +966,8 @@ def get_conv_template(name: str) -> Conversation: roles=("<|im_start|>user", "<|im_start|>assistant"), sep_style=SeparatorStyle.CHATML, sep="<|im_end|>", - # stop_str="<|im_end|>", - stop_token_ids=[107, 1] + stop_str="<|im_end|>", + stop_token_ids=[107] ) ) From 50a8120d8e5694bf4e1ecba23c6261b7c08d194d Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Thu, 29 Feb 2024 16:21:00 +0000 Subject: [PATCH 39/52] Fix Gemma again :( --- fastchat/conversation.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index 50ebfe28e..78781feb5 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -971,6 +971,17 @@ def get_conv_template(name: str) -> Conversation: ) ) +# register_conv_template( +# Conversation( +# name="gemma", +# system_message="", +# roles=("user\n", "model\n"), +# sep_style=SeparatorStyle.NO_COLON_SINGLE, +# sep="\n", +# stop_str="", +# ) +# ) + if __name__ == "__main__": print("Vicuna template:") conv = get_conv_template("vicuna_v1.1") From 3fcfee102f821d11a7d0130ce1ae2332478c4dec Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Sun, 3 Mar 2024 20:42:05 +0000 Subject: [PATCH 40/52] Add starchat2 template --- fastchat/model/model_adapter.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index e7225c6c9..9a9091ca6 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1290,6 +1290,17 @@ def match(self, model_path: str): def get_default_conv_template(self, model_path: str) -> Conversation: return get_conv_template("starchat") +class StarChat2Adapter(BaseModelAdapter): + """The model adapter for HuggingFaceH4/starchat2-v0.1""" + + def match(self, model_path: str): + return any( + model_str in model_path.lower() + for model_str in ["starchat2", "starcoder2"] + ) + + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + return get_conv_template("chatml") class Llama2Adapter(BaseModelAdapter): """The model adapter for llama-2""" @@ -1346,7 +1357,9 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) # Legacy models did not have a chat template, so we default to the H4 template. - if tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: + if "gemma" in model_path.lower(): + return get_conv_template("gemma") + elif tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: return get_conv_template("h4_default_v3") else: return get_conv_template("chatml") @@ -1843,6 +1856,7 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat register_model_adapter(PythiaAdapter) register_model_adapter(InternLMChatAdapter) register_model_adapter(StarChatAdapter) +register_model_adapter(StarChat2Adapter) register_model_adapter(Llama2Adapter) register_model_adapter(CuteGPTAdapter) register_model_adapter(OpenOrcaAdapter) From b0119116eb4345fffc234b2a6b0dad7d8c8da1db Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Tue, 12 Mar 2024 12:33:09 +0000 Subject: [PATCH 41/52] Add DeepSeekCoder --- fastchat/conversation.py | 12 ++++++++++++ fastchat/model/model_adapter.py | 13 ++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index 78781feb5..f9902abc5 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -971,6 +971,18 @@ def get_conv_template(name: str) -> Conversation: ) ) +# Deepseek code default template +register_conv_template( + Conversation( + name="deepseek-coder", + system_template="You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.", + roles=("### Instruction:", "### Response:"), + sep="\n", + stop_str="<|EOT|>", + sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE, + ) +) + # register_conv_template( # Conversation( # name="gemma", diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 9a9091ca6..3b6424902 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1783,7 +1783,7 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): model.config.pad_token_id = tokenizer.pad_token_id return model, tokenizer - def get_default_conv_template(self, model_path: str) -> Conversation: + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: return get_conv_template("llama-2") class Hermes2Adapter(BaseModelAdapter): @@ -1812,6 +1812,16 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: return get_conv_template("OpenHermes-2.5-Mistral-7B") +class DeepseekCoderAdapter(BaseModelAdapter): + """The model adapter for deepseek-ai's coder models""" + + def match(self, model_path: str): + return "deepseek-coder" in model_path.lower() + + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + return get_conv_template("deepseek-coder") + + # Note: the registration order matters. # The one registered earlier has a higher matching priority. @@ -1879,6 +1889,7 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat register_model_adapter(ZephyrAdapter) register_model_adapter(H4Qwen2Adapter) register_model_adapter(H4GemmaAdapter) +register_model_adapter(DeepseekCoderAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From fd99ebcd6fda1b691c7dfc52914e3b9a410db0ff Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Tue, 12 Mar 2024 13:06:40 +0000 Subject: [PATCH 42/52] Fix deepseekcoder template --- fastchat/model/model_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 3b6424902..6ead338d9 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1383,7 +1383,7 @@ class H4DeepSeekAdapter(BaseModelAdapter): """The model adapter for H4 DeepSeek models""" def match(self, model_path: str): - return "deepseek" in model_path.lower() + return "deepseek" in model_path.lower() and "deepseek-coder" not in model_path.lower() def load_model(self, model_path: str, from_pretrained_kwargs: dict): model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) From 1b8f73206a766737c24a30ad83779eb6f233927d Mon Sep 17 00:00:00 2001 From: Edward Beeching Date: Fri, 15 Mar 2024 10:56:39 +0100 Subject: [PATCH 43/52] bump openai to 1.14.0 (#11) --- fastchat/llm_judge/common.py | 12 +++++++----- fastchat/llm_judge/gen_api_answer.py | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/fastchat/llm_judge/common.py b/fastchat/llm_judge/common.py index ad1180034..064056a5c 100644 --- a/fastchat/llm_judge/common.py +++ b/fastchat/llm_judge/common.py @@ -11,7 +11,8 @@ import time from typing import Optional -import openai +from openai import OpenAI, OpenAIError + import anthropic from fastchat.model.model_adapter import get_conversation_template @@ -398,20 +399,21 @@ def play_a_match_pair(match: MatchPair, output_file: str): def chat_compeletion_openai(model, conv, temperature, max_tokens): + client = OpenAI() output = API_ERROR_OUTPUT for _ in range(API_MAX_RETRY): try: messages = conv.to_openai_api_messages() - response = openai.ChatCompletion.create( + response = client.chat.completions.create( model=model, messages=messages, n=1, temperature=temperature, - max_tokens=max_tokens, + max_tokens=max_tokens ) - output = response["choices"][0]["message"]["content"] + output = response.choices[0].message.content break - except openai.error.OpenAIError as e: + except OpenAIError as e: print(type(e), e) time.sleep(API_RETRY_SLEEP) diff --git a/fastchat/llm_judge/gen_api_answer.py b/fastchat/llm_judge/gen_api_answer.py index 151acd2d4..6382527b3 100644 --- a/fastchat/llm_judge/gen_api_answer.py +++ b/fastchat/llm_judge/gen_api_answer.py @@ -114,7 +114,7 @@ def get_answer( args = parser.parse_args() if args.openai_api_base is not None: - openai.api_base = args.openai_api_base + raise ValueError("The 'openai.api_base' option is not available in openai>=1.0, pass it when you instantiate the client, e.g. 'OpenAI(base_url=args.openai_api_base)") question_file = f"data/{args.bench_name}/question.jsonl" questions = load_questions(question_file, args.question_begin, args.question_end) From 3c49ec0cd6b8c27aa68d0c8c80d2db0fd7fc14bc Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Wed, 27 Mar 2024 19:28:02 +0000 Subject: [PATCH 44/52] Add DBRX --- fastchat/conversation.py | 35 +++++++++++++++++++++++++++++++++ fastchat/model/model_adapter.py | 27 +++++++++++++++++++++++-- 2 files changed, 60 insertions(+), 2 deletions(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index f9902abc5..1f797e901 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -983,6 +983,41 @@ def get_conv_template(name: str) -> Conversation: ) ) +register_conv_template( + Conversation( + name="dbrx", + system_template="""<|im_start|>system +{system_message}""", + system_message="""You are DBRX, created by Databricks. The current date is March 27, 2024. + +Your knowledge base was last updated in December 2023. You answer questions about events prior to and after December 2023 the way a highly informed individual in December 2023 would if they were talking to someone from the above date, and you can let the user know this when relevant. + +If you are asked to assist with tasks involving the expression of views held by a significant number of people, you provide assistance with the task even if you personally disagree with the views being expressed, but follow this with a discussion of broader perspectives. + +You don't engage in stereotyping, including the negative stereotyping of majority groups. + +If asked about controversial topics, you try to provide careful thoughts and objective information without downplaying its harmful content or implying that there are reasonable perspectives on both sides. + +You are happy to help with writing, analysis, question answering, math, coding, and all sorts of other tasks. + +You use markdown for coding, which includes JSON blocks and Markdown tables. + +You do not have tools enabled at this time, so cannot run code or access the internet. You can only provide information that you have been trained on. You do not send or receive links or images. + +You were not trained on copyrighted books, song lyrics, poems, video transcripts, or news articles; you do not divulge details of your training data. You do not provide song lyrics, poems, or news articles and instead refer the user to find them online or in a store. + +You give concise responses to simple questions or statements, but provide thorough responses to more complex and open-ended questions. + +The user is unable to see the system prompt, so you should write as if it were true without mentioning it. + +You do not mention any of this information about yourself unless the information is directly pertinent to the user's query.""", + roles=("<|im_start|>user", "<|im_start|>assistant"), + sep_style=SeparatorStyle.CHATML, + sep="<|im_end|>", + stop_token_ids=[100279, 100257], + ) +) + # register_conv_template( # Conversation( # name="gemma", diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 6ead338d9..22b428bef 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1285,9 +1285,9 @@ class StarChatAdapter(BaseModelAdapter): """The model adapter for HuggingFaceH4/starchat-beta""" def match(self, model_path: str): - return "starchat" in model_path.lower() + return "starchat" in model_path.lower() and "starchat2" not in model_path.lower() - def get_default_conv_template(self, model_path: str) -> Conversation: + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: return get_conv_template("starchat") class StarChat2Adapter(BaseModelAdapter): @@ -1821,7 +1821,29 @@ def match(self, model_path: str): def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: return get_conv_template("deepseek-coder") +class DBRXAdapter(BaseModelAdapter): + """The model adapter for DBRX models""" + + def match(self, model_path: str): + model_path = model_path.lower() + return "dbrx" in model_path and not "HuggingFaceH4" in model_path + + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + revision = from_pretrained_kwargs.get("revision", "main") + model = AutoModelForCausalLM.from_pretrained( + model_path, + trust_remote_code=True, + **from_pretrained_kwargs, + ) + tokenizer = AutoTokenizer.from_pretrained( + model_path, trust_remote_code=True, revision=revision + ) + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + return model, tokenizer + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + return get_conv_template("dbrx") # Note: the registration order matters. # The one registered earlier has a higher matching priority. @@ -1890,6 +1912,7 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat register_model_adapter(H4Qwen2Adapter) register_model_adapter(H4GemmaAdapter) register_model_adapter(DeepseekCoderAdapter) +register_model_adapter(DBRXAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From 03d1d72b7ed51a4475d5c238a4f9998134d5c3cc Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Wed, 27 Mar 2024 19:40:53 +0000 Subject: [PATCH 45/52] Fix remote code --- fastchat/model/model_adapter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 22b428bef..10780ebf9 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1832,7 +1832,6 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): revision = from_pretrained_kwargs.get("revision", "main") model = AutoModelForCausalLM.from_pretrained( model_path, - trust_remote_code=True, **from_pretrained_kwargs, ) tokenizer = AutoTokenizer.from_pretrained( From b3539849ef502c12a68be1de1c914efd046cfa7f Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Thu, 28 Mar 2024 19:55:11 +0000 Subject: [PATCH 46/52] Add Hermes PRO --- fastchat/model/model_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 10780ebf9..d4447d947 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1794,7 +1794,7 @@ class Hermes2Adapter(BaseModelAdapter): def match(self, model_path: str): return any( model_str in model_path.lower() - for model_str in ["openhermes-2.5-mistral-7b", "openhermes-2-mistral-7b"] + for model_str in ["openhermes-2.5-mistral-7b", "openhermes-2-mistral-7b", "hermes-2-pro-mistral-7b"] ) def load_model(self, model_path: str, from_pretrained_kwargs: dict): From 1ec86e7f6626568a1717c11a983afd0a4134ffa6 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Mon, 1 Apr 2024 21:19:57 +0000 Subject: [PATCH 47/52] Add ORPO template --- fastchat/conversation.py | 12 ++++++++++++ fastchat/model/model_adapter.py | 19 +++++++++++++++++-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index 1f797e901..82d6cc7c0 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -1029,6 +1029,18 @@ def get_conv_template(name: str) -> Conversation: # ) # ) +register_conv_template( + Conversation( + name="orpo-qwen", + system_template="<|im_start|>system\n{system_message}", + roles=("<|im_start|>user", "<|im_start|>assistant"), + sep_style=SeparatorStyle.CHATML, + sep="<|im_end|>", + stop_token_ids=[151643, 151644, 151645], + stop_str=["<|endoftext|>", "<|im_start|>", "<|im_end|>"], + ) +) + if __name__ == "__main__": print("Vicuna template:") conv = get_conv_template("vicuna_v1.1") diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index d4447d947..64dd826e1 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1326,7 +1326,7 @@ class MistralAdapter(BaseModelAdapter): """The model adapter for mistral""" def match(self, model_path: str): - return "mistral" in model_path.lower() + return "mistral" in model_path.lower() and "HuggingFaceH4" not in model_path def load_model(self, model_path: str, from_pretrained_kwargs: dict): model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) @@ -1443,7 +1443,7 @@ class H4Qwen2Adapter(BaseModelAdapter): """The model adapter for H4 Qwen2 models""" def match(self, model_path: str): - return "qwen" in model_path.lower() + return "qwen" in model_path.lower() and "HuggingFaceH4" in model_path.lower() def load_model(self, model_path: str, from_pretrained_kwargs: dict): model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) @@ -1459,6 +1459,20 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat else: return get_conv_template("chatml") +class OrpoQwenAdapter(BaseModelAdapter): + """The model adapter for Orpo Qwen2 models""" + + def match(self, model_path: str): + return "qwen" in model_path.lower() and "orpo-explorers" in model_path.lower() + + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + return model, tokenizer + + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + return get_conv_template("orpo-qwen") class CuteGPTAdapter(BaseModelAdapter): """The model adapter for llama-2""" @@ -1912,6 +1926,7 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat register_model_adapter(H4GemmaAdapter) register_model_adapter(DeepseekCoderAdapter) register_model_adapter(DBRXAdapter) +register_model_adapter(OrpoQwenAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From aa83aeef015f231af1032cb67dae9421186e678d Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Mon, 1 Apr 2024 21:27:41 +0000 Subject: [PATCH 48/52] Fix stop_str --- fastchat/conversation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index 82d6cc7c0..ce48a5dce 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -1037,7 +1037,7 @@ def get_conv_template(name: str) -> Conversation: sep_style=SeparatorStyle.CHATML, sep="<|im_end|>", stop_token_ids=[151643, 151644, 151645], - stop_str=["<|endoftext|>", "<|im_start|>", "<|im_end|>"], + stop_str="<|im_end|>", ) ) From 07e394a6a77a2dd095b272e1527c9c327949e9ba Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Mon, 1 Apr 2024 21:45:50 +0000 Subject: [PATCH 49/52] Remove system rpopmt --- fastchat/conversation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/fastchat/conversation.py b/fastchat/conversation.py index ce48a5dce..359add929 100644 --- a/fastchat/conversation.py +++ b/fastchat/conversation.py @@ -1032,7 +1032,6 @@ def get_conv_template(name: str) -> Conversation: register_conv_template( Conversation( name="orpo-qwen", - system_template="<|im_start|>system\n{system_message}", roles=("<|im_start|>user", "<|im_start|>assistant"), sep_style=SeparatorStyle.CHATML, sep="<|im_end|>", From f0448ed683853971f7255bd6422dc8733f89f282 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Tue, 16 Apr 2024 15:17:27 +0000 Subject: [PATCH 50/52] Add revision to WiazrdLM --- fastchat/model/model_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 64dd826e1..4a7b08777 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1062,7 +1062,7 @@ class WizardLMAdapter(BaseModelAdapter): def match(self, model_path: str): return "wizardlm" in model_path.lower() - def get_default_conv_template(self, model_path: str) -> Conversation: + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: model_path = model_path.lower() if "13b" in model_path or "30b" in model_path or "70b" in model_path: return get_conv_template("vicuna_v1.1") From ffe924c79d38bbf3874c201a24e419475ad08179 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Fri, 19 Apr 2024 05:28:52 +0000 Subject: [PATCH 51/52] Add orpor llama --- fastchat/model/model_adapter.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 4a7b08777..555f66b74 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -1474,6 +1474,26 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict): def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: return get_conv_template("orpo-qwen") +class OrpoLlamaAdapter(BaseModelAdapter): + """The model adapter for Orpo Llama models""" + + def match(self, model_path: str): + return "llama" in model_path.lower() and "orpo-explorers" in model_path.lower() + + def load_model(self, model_path: str, from_pretrained_kwargs: dict): + model, tokenizer = super().load_model(model_path, from_pretrained_kwargs) + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + return model, tokenizer + + def get_default_conv_template(self, model_path: str, revision: str) -> Conversation: + tokenizer = AutoTokenizer.from_pretrained(model_path, revision=revision) + # Legacy models did not have a chat template, so we default to the H4 template. + if tokenizer.chat_template is None or "<|im_start|>" not in tokenizer.chat_template: + return get_conv_template("h4_default_v3") + else: + return get_conv_template("chatml") + class CuteGPTAdapter(BaseModelAdapter): """The model adapter for llama-2""" @@ -1927,6 +1947,7 @@ def get_default_conv_template(self, model_path: str, revision: str) -> Conversat register_model_adapter(DeepseekCoderAdapter) register_model_adapter(DBRXAdapter) register_model_adapter(OrpoQwenAdapter) +register_model_adapter(OrpoLlamaAdapter) # After all adapters, try the default base adapter. register_model_adapter(BaseModelAdapter) From 59966c40a68a99cdc65dbda1982042fd625c0e20 Mon Sep 17 00:00:00 2001 From: lewtun Date: Wed, 1 May 2024 15:22:29 +0200 Subject: [PATCH 52/52] Relax pydantic --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 01e60c035..f29386c44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ classifiers = [ ] dependencies = [ "aiohttp", "fastapi", "httpx", "markdown2[all]", "nh3", "numpy", - "prompt_toolkit>=3.0.0", "pydantic<2,>=1", "requests", "rich>=10.0.0", + "prompt_toolkit>=3.0.0", "pydantic<3,>=1", "requests", "rich>=10.0.0", "shortuuid", "tiktoken", "uvicorn", ]