Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,7 @@ examples/local/*
.logs
scratch_pad.py
.*
*.png
*.html
usecases/
compare_gemini_outputs_v1.py
113 changes: 113 additions & 0 deletions flo_ai/examples/arium_linear_usage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import asyncio
from flo_ai.arium.builder import AriumBuilder
from flo_ai.models.agent import Agent
from flo_ai.llm.openai_llm import OpenAI


async def simple_example():
"""
Simple example: 2 agents connected directly together
Agent 1 (Greeter) -> Agent 2 (Responder)
"""
llm = OpenAI(model='gpt-4o-mini', temperature=0.7)

# Agent 1: Greeter
greeter = Agent(
name='greeter',
system_prompt='You are a friendly greeter. Say hello and introduce the topic to the next agent.',
llm=llm,
)

# Agent 2: Responder
responder = Agent(
name='responder',
system_prompt='You are a helpful responder. Provide a thoughtful response to what the greeter shared.',
llm=llm,
)

# Connect agents directly: greeter -> responder
result = await (
AriumBuilder()
.add_agent(greeter)
.add_agent(responder)
.start_with(greeter)
.connect(greeter, responder) # Direct connection
.end_with(responder)
.build_and_run(["Hello, I'd like to learn about Python programming!"])
)

print('Simple Example Result:')
print(result)
return result


async def main():
"""
Example showing how to create 2 simple agents connected directly together
using the AriumBuilder.
"""

# Create LLM instance
llm = OpenAI(model='gpt-4o-mini', temperature=0.7)

# Create first agent - Content Analyst
content_analyst = Agent(
name='content_analyst',
system_prompt="""You are a content analyst. When you receive input, analyze it and provide:
1. A brief summary of the content
2. The main topics covered
3. Any insights or observations

Pass your analysis to the next agent for final processing.""",
llm=llm,
role='Content Analyst',
)

# Create second agent - Summary Generator
summary_generator = Agent(
name='summary_generator',
system_prompt="""You are a summary generator. You receive analysis from the content analyst.
Your job is to create a concise, well-structured final summary that includes:
1. Key takeaways
2. Actionable insights
3. A clear conclusion

Make your response clear and professional.""",
llm=llm,
role='Summary Generator',
)

# Create Arium workflow using AriumBuilder
print('Building Arium workflow...')

result = await (
AriumBuilder()
.add_agents([content_analyst, summary_generator])
.start_with(content_analyst)
.connect(content_analyst, summary_generator) # Direct connection
.end_with(summary_generator)
.build_and_run(
[
'Machine learning is revolutionizing various industries. '
'From healthcare to finance, AI systems are being deployed '
'to automate processes, improve decision-making, and enhance '
'customer experiences. However, challenges remain around '
'data privacy, algorithmic bias, and the need for skilled '
'professionals to manage these systems effectively.'
]
)
)

print('\n' + '=' * 50)
print('ARIUM WORKFLOW RESULT:')
print('=' * 50)
print(result)


if __name__ == '__main__':
print('Running simple example first...')
asyncio.run(simple_example())

print('\n' + '=' * 80)
print('Now running detailed example...')
asyncio.run(main())
159 changes: 159 additions & 0 deletions flo_ai/examples/example_graph_visualization.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
#!/usr/bin/env python3
"""
Example script demonstrating the graph visualization feature in BaseArium.
This script creates a simple workflow with agents and tools, then generates a PNG visualization.
"""

from flo_ai.arium.base import BaseArium
from flo_ai.models.agent import Agent
from flo_ai.tool.flo_tool import flo_tool
from flo_ai.llm.openai_llm import OpenAI
from typing import Literal


# Create a simple validation tool using the @flo_tool decorator
@flo_tool(
name='validation_tool',
description='Validates input data and returns validation status',
parameter_descriptions={
'data': 'The data to validate',
'strict': 'Whether to use strict validation rules',
},
)
async def validate_data(data: str, strict: bool = False) -> str:
"""Validate the input data according to specified rules."""
if not data:
return 'invalid: empty data'

if strict and len(data) < 10:
return 'invalid: data too short for strict validation'

return 'valid: data passed validation'


# Create mock agents for demonstration
def create_sample_agents():
"""Create sample agents for the demonstration"""
# Note: This would normally require a valid OpenAI API key
# For demonstration purposes, we'll use a mock LLM
try:
llm = OpenAI(model='gpt-4')
except Exception:
# If OpenAI is not available, create a mock LLM
class MockLLM:
def __init__(self, model):
self.model = model

llm = MockLLM('gpt-4')

input_processor = Agent(
name='input_processor',
system_prompt='Process incoming requests',
llm=llm,
role='Input Processor',
)

analyzer = Agent(
name='analyzer',
system_prompt='Analyze processed data',
llm=llm,
role='Data Analyzer',
)

decision_maker = Agent(
name='decision_maker',
system_prompt='Make decisions based on analysis',
llm=llm,
role='Decision Maker',
)

output_formatter = Agent(
name='output_formatter',
system_prompt='Format final output',
llm=llm,
role='Output Formatter',
)

return input_processor, analyzer, decision_maker, output_formatter


# Router function for demonstration
def analysis_router(
analysis_result: str,
) -> Literal['decision_maker', 'output_formatter']:
"""Route based on analysis result"""
if 'complex' in analysis_result.lower():
return 'decision_maker'
else:
return 'output_formatter'


def main():
"""Create a sample workflow and generate visualization"""

# Create the BaseArium instance
arium = BaseArium()

# Create sample agents and tools
input_processor, analyzer, decision_maker, output_formatter = create_sample_agents()

# Get the tool from the decorated function
validation_tool = validate_data.tool

# Add nodes to the arium
arium.add_nodes(
[input_processor, validation_tool, analyzer, decision_maker, output_formatter]
)

# Set up the workflow
# Start with input processor
arium.start_at(input_processor)

# Input processor -> Validation tool
arium.add_edge('input_processor', ['validation_tool'])

# Validation tool -> Analyzer
arium.add_edge('validation_tool', ['analyzer'])

# Analyzer -> Decision maker OR Output formatter (with router)
arium.add_edge(
'analyzer', ['decision_maker', 'output_formatter'], router=analysis_router
)

# Decision maker -> Output formatter
arium.add_edge('decision_maker', ['output_formatter'])

# Output formatter -> End
arium.add_end_to(output_formatter)

# Validate the graph
try:
arium.validate_graph()
print('✅ Graph validation successful!')
except ValueError as e:
print(f'❌ Graph validation failed: {e}')
return

# Generate visualization
print('🎨 Generating graph visualization...')

# Generate with default settings
arium.visualize_graph('workflow_graph.png')

# Generate with custom settings
arium.visualize_graph(
output_path='custom_workflow_graph.png',
figsize=(14, 10),
node_size=4000,
font_size=12,
dpi=400,
)

print('✅ Graph visualization completed!')
print('📁 Check the following files:')
print(' - workflow_graph.png (default settings)')
print(' - custom_workflow_graph.png (custom settings)')


if __name__ == '__main__':
main()
1 change: 0 additions & 1 deletion flo_ai/examples/yaml_agent_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
# Example YAML configuration
yaml_config = """
apiVersion: flo/alpha-v1
kind: FloAgent
metadata:
name: email-summary-flo
version: 1.0.0
Expand Down
Loading