Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 51 additions & 0 deletions flo_ai/examples/tool_using_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import asyncio
from typing import Any
from flo_ai.builder.agent_builder import AgentBuilder
from flo_ai.tool.base_tool import Tool
from flo_ai.models.base_agent import ReasoningPattern
from flo_ai.models.agent import Agent
from flo_ai.llm import Anthropic


async def calculate(operation: str, x: float, y: float) -> float:
if operation == 'add':
return x + y
elif operation == 'multiply':
return x * y
raise ValueError(f'Unknown operation: {operation}')


# Define a calculator tool
calculator_tool: Tool = Tool(
name='calculate',
description='Perform basic calculations',
function=calculate,
parameters={
'operation': {
'type': 'string',
'description': 'The operation to perform (add or multiply)',
},
'x': {'type': 'number', 'description': 'First number'},
'y': {'type': 'number', 'description': 'Second number'},
},
)

# Create a tool-using agent with Claude
agent: Agent = (
AgentBuilder()
.with_name('Calculator Assistant')
.with_prompt('You are a math assistant that can perform calculations.')
.with_llm(Anthropic(model='claude-sonnet-4-5-20250929'))
.with_tools([calculator_tool])
.with_reasoning(ReasoningPattern.REACT)
.with_retries(2)
.build()
)


async def main() -> None:
response: Any = await agent.run('Calculate 5 plus 3')
print(f'Response: {response}')


asyncio.run(main())
4 changes: 3 additions & 1 deletion flo_ai/flo_ai/builder/agent_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@ def __init__(self):
self._reasoning_pattern = ReasoningPattern.DIRECT
self._output_schema: Optional[Dict[str, Any]] = None
self._role: Optional[str] = None
self._act_as: Optional[str] = None
self._act_as: Optional[str] = (
'assistant' # Default to 'assistant' instead of None
)

def with_name(self, name: str) -> 'AgentBuilder':
"""Set the agent's name"""
Expand Down
80 changes: 72 additions & 8 deletions flo_ai/flo_ai/llm/anthropic_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,32 @@ async def generate(
conversation = []
for msg in messages:
if msg['role'] != 'system':
conversation.append(
{
'role': 'assistant' if msg['role'] == 'assistant' else 'user',
'content': msg['content'],
}
)
# Handle function/tool result messages specially for Claude
if msg['role'] == 'function':
# Claude expects tool results in a specific format
# If this is a tool result, format it as a user message with tool_result content
tool_use_id = msg.get('tool_use_id', 'unknown')
conversation.append(
{
'role': 'user',
'content': [
{
'type': 'tool_result',
'tool_use_id': tool_use_id,
'content': msg['content'],
}
],
}
)
else:
conversation.append(
{
'role': 'assistant'
if msg['role'] == 'assistant'
else 'user',
'content': msg['content'],
}
)

try:
anthropic_kwargs = {
Expand Down Expand Up @@ -103,19 +123,28 @@ async def generate(
},
)

# Extract text content from TextBlock objects
text_content = ''
for content_block in response.content:
if content_block.type == 'text':
text_content = content_block.text
break

# Check if there's a tool use in the response
for content_block in response.content:
if content_block.type == 'tool_use':
return {
'content': response.content[0].text if response.content else '',
'content': text_content,
'raw_content': response.content, # Store raw content for Claude's tool flow
'function_call': {
'name': content_block.name,
'arguments': json.dumps(content_block.input),
'id': content_block.id, # Include the tool_use_id for Claude
},
}

# Handle regular text response
return {'content': response.content[0].text if response.content else ''}
return {'content': text_content}

except Exception as e:
raise Exception(f'Error in Claude API call: {str(e)}')
Expand Down Expand Up @@ -209,3 +238,38 @@ def format_tools_for_llm(self, tools: List['Tool']) -> List[Dict[str, Any]]:
def format_image_in_message(self, image: ImageMessage) -> str:
"""Format a image in the message"""
raise NotImplementedError('Not implemented image for LLM Anthropic')

def get_assistant_message_for_tool_call(
self, response: Dict[str, Any]
) -> Optional[Any]:
"""
Get the assistant message content for tool calls.
For Claude, this returns the raw_content which includes tool_use blocks.
For other LLMs, returns None to use default text content.
"""
if isinstance(response, dict) and 'raw_content' in response:
return response['raw_content']
return None

def get_tool_use_id(self, function_call: Dict[str, Any]) -> Optional[str]:
"""
Extract tool_use_id from function call if available.
Returns the ID for Claude's tool_use tracking, None for other LLMs.
"""
return function_call.get('id')

def format_function_result_message(
self, function_name: str, content: str, tool_use_id: Optional[str] = None
) -> Dict[str, Any]:
"""
Format a function result message for the LLM.
For Claude, includes tool_use_id in the message.
"""
message = {
'role': 'function',
'name': function_name,
'content': content,
}
if tool_use_id:
message['tool_use_id'] = tool_use_id
return message
47 changes: 45 additions & 2 deletions flo_ai/flo_ai/llm/base_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,18 +46,61 @@ async def stream(
async def get_function_call(
self, response: Dict[str, Any]
) -> Optional[Dict[str, Any]]:
"""Extract function call information from LLM response"""
if hasattr(response, 'function_call') and response.function_call:
return {
result = {
'name': response.function_call.name,
'arguments': response.function_call.arguments,
}
# Include ID if available (LLM-specific)
if hasattr(response.function_call, 'id'):
result['id'] = response.function_call.id
return result
elif isinstance(response, dict) and 'function_call' in response:
return {
result = {
'name': response['function_call']['name'],
'arguments': response['function_call']['arguments'],
}
# Include ID if available (LLM-specific)
if 'id' in response['function_call']:
result['id'] = response['function_call']['id']
return result
return None

def get_assistant_message_for_tool_call(
self, response: Dict[str, Any]
) -> Optional[Any]:
"""
Get the assistant message content for tool calls.
Override in LLM-specific implementations if special handling is needed.
Returns None to use default text content extraction.
"""
return None

def get_tool_use_id(self, function_call: Dict[str, Any]) -> Optional[str]:
"""
Extract tool_use_id from function call if available.
Override in LLM-specific implementations if IDs are used.
Returns None by default.
"""
return function_call.get('id')

def format_function_result_message(
self, function_name: str, content: str, tool_use_id: Optional[str] = None
) -> Dict[str, Any]:
"""
Format a function result message for the LLM.
Override in LLM-specific implementations for special formatting.
"""
message = {
'role': 'function',
'name': function_name,
'content': content,
}
if tool_use_id:
message['tool_use_id'] = tool_use_id
return message

@abstractmethod
def get_message_content(self, response: Dict[str, Any]) -> str:
"""Extract message content from response"""
Expand Down
46 changes: 31 additions & 15 deletions flo_ai/flo_ai/models/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def __init__(
system_prompt: str,
llm: BaseLLM,
tools: Optional[List[Tool]] = None,
max_retries: int = 3,
max_retries: int = 0,
max_tool_calls: int = 5,
reasoning_pattern: ReasoningPattern = ReasoningPattern.DIRECT,
output_schema: Optional[Dict[str, Any]] = None,
Expand Down Expand Up @@ -268,9 +268,35 @@ async def _run_with_tools(
continue
break

# If there's a function call, add the assistant's response
# LLM-specific implementations handle special formatting (e.g., Claude's raw_content)
assistant_message_content = (
self.llm.get_assistant_message_for_tool_call(response)
)
if assistant_message_content:
# LLM returned special formatting (e.g., Claude's raw_content)
messages.append(
{
'role': self.act_as,
'content': assistant_message_content,
}
)
else:
# Use default text content extraction
assistant_text = self.llm.get_message_content(response)
if assistant_text:
messages.append(
{
'role': self.act_as,
'content': assistant_text,
}
)

# Execute the tool
try:
function_name = function_call['name']
# Get tool_use_id if available (LLM-specific, e.g., Claude)
tool_use_id = self.llm.get_tool_use_id(function_call)
if isinstance(function_call['arguments'], str):
function_args = json.loads(function_call['arguments'])
else:
Expand Down Expand Up @@ -314,21 +340,11 @@ async def _run_with_tools(
)

# Add the function response to messages for context
messages.append(
{
'role': MessageType.FUNCTION,
'name': function_name,
'content': f'Here is the result of the tool call: \n {str(function_response)}',
}
)

# Add a prompt to continue the reasoning
messages.append(
{
'role': MessageType.USER,
'content': 'Continue with your reasoning based on this result. What should be done next?',
}
# LLM-specific implementations format the message appropriately
function_result_msg = self.llm.format_function_result_message(
function_name, str(function_response), tool_use_id
)
messages.append(function_result_msg)

except (json.JSONDecodeError, KeyError, ToolExecutionError) as e:
# Record tool call failure
Expand Down