-
Notifications
You must be signed in to change notification settings - Fork 0
11/21 #7
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
11/21 #7
Changes from all commits
66add8c
5aa5634
2a5a6ec
6ea3dcb
fe55883
c0d41fa
9d144e5
a87f116
31dee23
e0e278c
db81680
c33056c
b902395
c1b2a69
9327270
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -265,7 +265,10 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage | None], usage: LLMU | |
| } | ||
|
|
||
| tool_responses.append(tool_response) | ||
| if tool_response["tool_response"] is not None: | ||
| # check direct return flag | ||
| direct_flag = (tool_invoke_meta.extra or {}).get("return_direct", False) | ||
|
|
||
| if tool_response["tool_response"] is not None and not direct_flag: | ||
| self._current_thoughts.append( | ||
| ToolPromptMessage( | ||
| content=str(tool_response["tool_response"]), | ||
|
|
@@ -274,6 +277,28 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage | None], usage: LLMU | |
| ) | ||
| ) | ||
|
|
||
| if direct_flag: | ||
| # save agent thought for this tool call | ||
| self.save_agent_thought( | ||
| agent_thought_id=agent_thought_id, | ||
| tool_name=tool_call_name, | ||
| tool_input=tool_call_args, | ||
| thought=llm_result.message.content or "", | ||
| tool_invoke_meta={tool_call_name: tool_invoke_meta.to_dict()}, | ||
| observation={tool_call_name: tool_invoke_response}, | ||
| answer=str(tool_invoke_response or ""), | ||
| messages_ids=message_file_ids, | ||
| ) | ||
| self.queue_manager.publish( | ||
| QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER | ||
| ) | ||
|
|
||
| # publish end event immediately and return | ||
| final_answer = str(tool_invoke_response or "") | ||
| llm_final_usage = llm_usage.get("usage") or LLMUsage.empty_usage() | ||
| yield from self._yield_final_answer(prompt_messages, final_answer, llm_final_usage) | ||
| return | ||
|
Comment on lines
+280
to
+300
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This
Comment on lines
+280
to
+300
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This block introduces a large amount of duplicated logic for finalizing the agent run. The logic for publishing the Currently, the original code path at the end of the method (lines 352-363) seems to be missing a To improve maintainability and fix the bug in the original path, I recommend extracting this finalization logic into a private helper method. This helper could then be called here, and also at the end of the A refactoring would look something like this:
This would fix the bug and remove the code duplication. |
||
|
|
||
| if len(tool_responses) > 0: | ||
| # save agent thought | ||
| self.save_agent_thought( | ||
|
|
@@ -301,18 +326,10 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage | None], usage: LLMU | |
|
|
||
| iteration_step += 1 | ||
|
|
||
| # publish end event | ||
| self.queue_manager.publish( | ||
| QueueMessageEndEvent( | ||
| llm_result=LLMResult( | ||
| model=model_instance.model, | ||
| prompt_messages=prompt_messages, | ||
| message=AssistantPromptMessage(content=final_answer), | ||
| usage=llm_usage["usage"] or LLMUsage.empty_usage(), | ||
| system_fingerprint="", | ||
| ) | ||
| ), | ||
| PublishFrom.APPLICATION_MANAGER, | ||
| yield from self._yield_final_answer( | ||
| prompt_messages, | ||
| final_answer, | ||
| llm_usage["usage"] or LLMUsage.empty_usage(), | ||
| ) | ||
|
|
||
| def check_tool_calls(self, llm_result_chunk: LLMResultChunk) -> bool: | ||
|
|
@@ -377,6 +394,36 @@ def extract_blocking_tool_calls(self, llm_result: LLMResult) -> list[tuple[str, | |
|
|
||
| return tool_calls | ||
|
|
||
| def _yield_final_answer( | ||
| self, | ||
| prompt_messages: list, | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Cursx marked this conversation as resolved.
|
||
| final_answer: str, | ||
| usage: LLMUsage, | ||
| ) -> Generator[LLMResultChunk, None, None]: | ||
| self.queue_manager.publish( | ||
| QueueMessageEndEvent( | ||
| llm_result=LLMResult( | ||
| model=self.model_instance.model, | ||
| prompt_messages=prompt_messages, | ||
| message=AssistantPromptMessage(content=final_answer), | ||
| usage=usage, | ||
| system_fingerprint="", | ||
| ) | ||
| ), | ||
| PublishFrom.APPLICATION_MANAGER, | ||
| ) | ||
|
|
||
| yield LLMResultChunk( | ||
| model=self.model_instance.model, | ||
| prompt_messages=prompt_messages, | ||
| system_fingerprint="", | ||
| delta=LLMResultChunkDelta( | ||
| index=0, | ||
| message=AssistantPromptMessage(content=final_answer), | ||
| usage=usage, | ||
| ), | ||
| ) | ||
|
|
||
| def _init_system_message(self, prompt_template: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: | ||
| """ | ||
| Initialize system message | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.