Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 30 additions & 45 deletions flo_ai/flo_ai/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,28 @@ async def run(
# Otherwise, run as tool agent
return await self._run_with_tools(retry_count, variables)

async def _handle_response_with_parser(
self, assistant_message: Optional[str], role: str, response: Dict[str, Any]
) -> None:
if assistant_message:
self.add_to_history(AssistantMessage(role=role, content=assistant_message))
else:
possible_tool_message = await self.llm.get_function_call(response)
if possible_tool_message:
self.add_to_history(
AssistantMessage(
role=role, content=str(possible_tool_message['arguments'])
)
Comment thread
coderabbitai[bot] marked this conversation as resolved.
)
else:
logger.debug('Warning: No message content found in response')
self.add_to_history(
AssistantMessage(
role=role,
content='No message content found in response',
)
)

async def _run_conversational(
self, retry_count: int, variables: Optional[Dict[str, Any]] = None
) -> List[BaseMessage]:
Expand All @@ -151,26 +173,9 @@ async def _run_conversational(
# Ensure act_as is not None (default to 'assistant' if missing)
role = self.act_as if self.act_as is not None else MessageType.ASSISTANT

if assistant_message:
self.add_to_history(
AssistantMessage(role=role, content=assistant_message)
)
else:
possible_tool_message = await self.llm.get_function_call(response)
if possible_tool_message:
self.add_to_history(
AssistantMessage(
role=role, content=possible_tool_message['arguments']
)
)
else:
logger.debug('Warning: No message content found in response')
self.add_to_history(
AssistantMessage(
role=role,
content='No message content found in response',
)
)
await self._handle_response_with_parser(
assistant_message, role, response
)

return self.conversation_history

Expand Down Expand Up @@ -400,32 +405,12 @@ async def _run_with_tools(
)

assistant_message = self.llm.get_message_content(final_response)
if assistant_message:
# Ensure act_as is not None (default to 'assistant' if missing)
role = (
self.act_as
if self.act_as is not None
else MessageType.ASSISTANT
)
self.add_to_history(
AssistantMessage(role=role, content=assistant_message)
)
return self.conversation_history
role = self.act_as if self.act_as is not None else MessageType.ASSISTANT
await self._handle_response_with_parser(
assistant_message, role, final_response
)

# Fallback: return function message only if we have valid tool execution data
if function_response is not None and function_name is not None:
return [
FunctionMessage(
content=str(
'The final result based on the tool executions is: \n'
+ str(function_response)
),
name=function_name,
)
]
else:
# No tools were executed and no assistant message, return safe fallback
return self.conversation_history
return self.conversation_history

except Exception as e:
retry_count += 1
Expand Down
2 changes: 1 addition & 1 deletion flo_ai/flo_ai/llm/azure_openai_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def get_message_content(self, response: Dict[str, Any]) -> str:
return response
if hasattr(response, 'content') and response.content is not None:
return str(response.content)
return str(response)
return ''

def format_tool_for_llm(self, tool: 'Tool') -> Dict[str, Any]:
"""Format a single tool for Azure OpenAI's API (OpenAI-compatible)."""
Expand Down
2 changes: 1 addition & 1 deletion flo_ai/flo_ai/llm/gemini_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def get_next_chunk():
if hasattr(chunk, 'text') and chunk.text:
yield {'content': chunk.text}

def get_message_content(self, response: Any) -> str:
def get_message_content(self, response: Any) -> Optional[str]:
"""Extract message content from response"""
if isinstance(response, dict):
return response.get('content', '')
Expand Down
2 changes: 1 addition & 1 deletion flo_ai/flo_ai/llm/ollama_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ async def stream(
if data.get('done') is True:
break

def get_message_content(self, response: Any) -> str:
def get_message_content(self, response: Any) -> Optional[str]:
"""Extract message content from response"""
if isinstance(response, dict):
return response.get('content', '')
Expand Down
2 changes: 1 addition & 1 deletion flo_ai/flo_ai/llm/openai_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def get_message_content(self, response: Dict[str, Any]) -> str:
return response
if hasattr(response, 'content') and response.content is not None:
return str(response.content)
return str(response)
return ''

def format_tool_for_llm(self, tool: 'Tool') -> Dict[str, Any]:
"""Format a single tool for OpenAI's API"""
Expand Down
2 changes: 1 addition & 1 deletion flo_ai/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "flo_ai"
version = "1.1.2"
version = "1.1.3"
description = "A easy way to create structured AI agents"
authors = [{ name = "rootflo", email = "engineering.tools@rootflo.ai" }]
requires-python = ">=3.10,<4.0"
Expand Down
2 changes: 1 addition & 1 deletion flo_ai/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

setuptools.setup(
name='flo-ai',
version='1.1.2',
version='1.1.3',
author='Rootflo',
description='Create composable AI agents',
long_description=long_description,
Expand Down
2 changes: 1 addition & 1 deletion flo_ai/tests/unit-tests/test_openai_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def test_openai_get_message_content(self):
mock_obj = Mock()
del mock_obj.content
result = llm.get_message_content(mock_obj)
assert result == str(mock_obj)
assert result == ''

def test_openai_format_tool_for_llm(self):
"""Test format_tool_for_llm method."""
Expand Down
6 changes: 3 additions & 3 deletions flo_ai/tests/unit-tests/test_openai_vllm.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,9 +294,9 @@ def test_openai_vllm_get_message_content(self):
)

# Test with dict response (should return str representation)
response = {'content': 'Hello, world!'}
response = str({'content': 'Hello, world!'})
result = llm.get_message_content(response)
assert result == "{'content': 'Hello, world!'}"
assert result == response

# Test with string response
result = llm.get_message_content('Direct string') # type: ignore[arg-type]
Expand All @@ -305,7 +305,7 @@ def test_openai_vllm_get_message_content(self):
# Test with empty content
response = {'content': ''}
result = llm.get_message_content(response)
assert result == "{'content': ''}"
assert result == ''

# Test with message object that has content attribute
mock_message = Mock()
Expand Down
2 changes: 1 addition & 1 deletion flo_ai/uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion wavefront/server/modules/agents_module/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ dependencies = [
"flo-utils",
"tools-module",
"api-services-module",
"flo-ai==1.1.1",
"flo-ai==1.1.3",
]

[tool.uv.sources]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ dependencies = [
"pandas~=2.2.3",
"ollama~=0.4.8",
"textract~=1.6.5",
"flo-ai==1.1.1",
"flo-ai==1.1.3",
"google-cloud-pubsub~=2.30.0",
"boto3<=1.38.40",
"pyyaml>=6.0.3,<7",
Expand Down
2 changes: 1 addition & 1 deletion wavefront/server/modules/tools_module/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name = "tools_module"
version = "0.1.0"
description = "Tools module for Flo AI agent system"
dependencies = [
"flo-ai==1.1.1",
"flo-ai==1.1.3",
"flo_cloud",

"datasource",
Expand Down
Loading
Loading