diff --git a/applications/ColossalQA/colossalqa/memory.py b/applications/ColossalQA/colossalqa/memory.py index 255df68a367e..7a5512281035 100644 --- a/applications/ColossalQA/colossalqa/memory.py +++ b/applications/ColossalQA/colossalqa/memory.py @@ -154,7 +154,7 @@ def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: remain = self.max_tokens - prompt_length while self.get_conversation_length() > remain: if len(self.buffered_history.messages) <= 2: - raise RuntimeError("Exeeed max_tokens, trunck size of retrieved documents is too large") + raise RuntimeError("Exceed max_tokens, trunk size of retrieved documents is too large") temp = self.buffered_history.messages.pop(0) self.summarized_history_temp.messages.append(temp) temp = self.buffered_history.messages.pop(0) diff --git a/applications/ColossalQA/examples/webui_demo/server.py b/applications/ColossalQA/examples/webui_demo/server.py index 050994567570..3b0f82845c87 100644 --- a/applications/ColossalQA/examples/webui_demo/server.py +++ b/applications/ColossalQA/examples/webui_demo/server.py @@ -77,12 +77,16 @@ def generate(data: GenerationTaskReq, request: Request): colossal_api = ColossalAPI(model_name, all_config["model"]["model_path"]) llm = ColossalLLM(n=1, api=colossal_api) elif all_config["model"]["mode"] == "api": - all_config["chain"]["mem_llm_kwargs"] = None - all_config["chain"]["disambig_llm_kwargs"] = None - all_config["chain"]["gen_llm_kwargs"] = None if model_name == "pangu_api": from colossalqa.local.pangu_llm import Pangu - llm = Pangu(id=1) + + gen_config = { + "user": "User", + "max_tokens": all_config["chain"]["disambig_llm_kwargs"]["max_new_tokens"], + "temperature": all_config["chain"]["disambig_llm_kwargs"]["temperature"], + "n": 1 # the number of responses generated + } + llm = Pangu(gen_config=gen_config) llm.set_auth_config() # verify user's auth info here elif model_name == "chatgpt_api": from langchain.llms import OpenAI