From 34ecdebdd9a885506048b51518753062e0e77c68 Mon Sep 17 00:00:00 2001 From: vishnu r kumar Date: Sun, 14 Dec 2025 20:14:10 +0530 Subject: [PATCH] chore: update flo-ai documentation --- .github/workflows/build-project.yml | 2 +- documentation/docs.json | 30 +- documentation/essentials/agents.mdx | 301 ------ documentation/essentials/arium.mdx | 377 ------- documentation/essentials/yaml-agents.mdx | 413 -------- documentation/essentials/yaml-workflows.mdx | 562 ---------- .../advanced}/images.mdx | 0 .../advanced}/llm-providers.mdx | 0 .../advanced}/markdown.mdx | 0 .../advanced}/navigation.mdx | 0 .../advanced}/reusable-snippets.mdx | 0 .../advanced}/routing.mdx | 0 .../advanced}/settings.mdx | 0 .../advanced}/telemetry.mdx | 0 .../{essentials => flo-ai/advanced}/tools.mdx | 0 documentation/flo-ai/core-features/agents.mdx | 411 ++++++++ documentation/flo-ai/core-features/arium.mdx | 620 +++++++++++ .../flo-ai/core-features/messages.mdx | 609 +++++++++++ .../flo-ai/core-features/yaml-agents.mdx | 784 ++++++++++++++ .../flo-ai/core-features/yaml-workflows.mdx | 997 ++++++++++++++++++ documentation/{ => flo-ai}/development.mdx | 114 +- .../{essentials => flo-ai/examples}/code.mdx | 0 .../{flo-ai.mdx => flo-ai/index.mdx} | 2 +- documentation/{ => flo-ai}/quickstart.mdx | 4 +- flo_ai/README.md | 46 - flo_ai/flo_ai/agent/agent.py | 2 +- flo_ai/flo_ai/arium/nodes.py | 4 +- flo_ai/pyproject.toml | 5 +- wavefront/README.md | 22 +- .../server/docker/call_processing.Dockerfile | 2 +- wavefront/server/docker/floconsole.Dockerfile | 2 +- wavefront/server/docker/floware.Dockerfile | 2 +- .../server/docker/inference_app.Dockerfile | 2 +- .../modules/agents_module/pyproject.toml | 2 +- .../knowledge_base_module/pyproject.toml | 2 +- .../modules/tools_module/pyproject.toml | 2 +- wavefront/server/uv.lock | 10 +- 37 files changed, 3505 insertions(+), 1824 deletions(-) delete mode 100644 documentation/essentials/agents.mdx delete mode 100644 documentation/essentials/arium.mdx delete mode 100644 documentation/essentials/yaml-agents.mdx delete mode 100644 documentation/essentials/yaml-workflows.mdx rename documentation/{essentials => flo-ai/advanced}/images.mdx (100%) rename documentation/{essentials => flo-ai/advanced}/llm-providers.mdx (100%) rename documentation/{essentials => flo-ai/advanced}/markdown.mdx (100%) rename documentation/{essentials => flo-ai/advanced}/navigation.mdx (100%) rename documentation/{essentials => flo-ai/advanced}/reusable-snippets.mdx (100%) rename documentation/{essentials => flo-ai/advanced}/routing.mdx (100%) rename documentation/{essentials => flo-ai/advanced}/settings.mdx (100%) rename documentation/{essentials => flo-ai/advanced}/telemetry.mdx (100%) rename documentation/{essentials => flo-ai/advanced}/tools.mdx (100%) create mode 100644 documentation/flo-ai/core-features/agents.mdx create mode 100644 documentation/flo-ai/core-features/arium.mdx create mode 100644 documentation/flo-ai/core-features/messages.mdx create mode 100644 documentation/flo-ai/core-features/yaml-agents.mdx create mode 100644 documentation/flo-ai/core-features/yaml-workflows.mdx rename documentation/{ => flo-ai}/development.mdx (55%) rename documentation/{essentials => flo-ai/examples}/code.mdx (100%) rename documentation/{flo-ai.mdx => flo-ai/index.mdx} (98%) rename documentation/{ => flo-ai}/quickstart.mdx (97%) diff --git a/.github/workflows/build-project.yml b/.github/workflows/build-project.yml index 369a1c1c..ee749618 100644 --- a/.github/workflows/build-project.yml +++ b/.github/workflows/build-project.yml @@ -22,7 +22,7 @@ jobs: - name: Install uv uses: astral-sh/setup-uv@v5 with: - version: "0.7.3" + version: "0.8.6" - name: Install dependencies run: cd flo_ai && uv sync diff --git a/documentation/docs.json b/documentation/docs.json index 42591c26..7e882a68 100644 --- a/documentation/docs.json +++ b/documentation/docs.json @@ -27,28 +27,34 @@ { "group": "Getting started", "pages": [ - "flo-ai", - "quickstart", - "development" + "flo-ai/index", + "flo-ai/quickstart", + "flo-ai/development" ] }, { "group": "Core Features", "pages": [ - "essentials/agents", - "essentials/arium", - "essentials/yaml-agents", - "essentials/code" + "flo-ai/core-features/messages", + "flo-ai/core-features/agents", + "flo-ai/core-features/arium", + "flo-ai/core-features/yaml-agents", + "flo-ai/core-features/yaml-workflows" ] }, { "group": "Advanced", "pages": [ - "essentials/llm-providers", - "essentials/tools", - "essentials/yaml-workflows", - "essentials/routing", - "essentials/telemetry" + "flo-ai/advanced/llm-providers", + "flo-ai/advanced/tools", + "flo-ai/advanced/routing", + "flo-ai/advanced/telemetry" + ] + }, + { + "group": "Examples", + "pages": [ + "flo-ai/examples/code" ] } ] diff --git a/documentation/essentials/agents.mdx b/documentation/essentials/agents.mdx deleted file mode 100644 index 74441499..00000000 --- a/documentation/essentials/agents.mdx +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: "Agents" -description: "Learn how to create and configure AI agents with Flo AI" -icon: "robot" ---- - -## Creating Agents - -Agents are the core building blocks of Flo AI. They represent AI-powered entities that can process inputs, use tools, and generate responses. - -### Basic Agent Creation - -Create a simple conversational agent: - -```python -from flo_ai.agent import AgentBuilder -from flo_ai.llm import OpenAI - -agent = ( - AgentBuilder() - .with_name('Customer Support') - .with_prompt('You are a helpful customer support agent.') - .with_llm(OpenAI(model='gpt-4o-mini')) - .build() -) - -response = await agent.run('How can I reset my password?') -``` - -### Agent Configuration - -Configure agents with various options: - -```python -agent = ( - AgentBuilder() - .with_name('Data Analyst') - .with_prompt('You are an expert data analyst.') - .with_llm(OpenAI(model='gpt-4o', temperature=0.3)) - .with_retries(3) # Retry on failure - .with_max_tokens(1000) - .build() -) -``` - -## Agent Types - -### Conversational Agents - -Basic agents for chat and Q&A: - -```python -conversational_agent = ( - AgentBuilder() - .with_name('Chat Assistant') - .with_prompt('You are a friendly conversational assistant.') - .with_llm(OpenAI(model='gpt-4o-mini')) - .build() -) -``` - -### Tool-Using Agents - -Agents that can use external tools: - -```python -from flo_ai.tool import flo_tool - -@flo_tool(description="Get weather information") -async def get_weather(city: str) -> str: - return f"Weather in {city}: sunny, 25°C" - -tool_agent = ( - AgentBuilder() - .with_name('Weather Assistant') - .with_prompt('You help users get weather information.') - .with_llm(OpenAI(model='gpt-4o-mini')) - .with_tools([get_weather.tool]) - .build() -) -``` - -### Structured Output Agents - -Agents that return structured data: - -```python -from pydantic import BaseModel, Field - -class AnalysisResult(BaseModel): - summary: str = Field(description="Executive summary") - key_findings: list = Field(description="List of key findings") - recommendations: list = Field(description="Actionable recommendations") - -structured_agent = ( - AgentBuilder() - .with_name('Business Analyst') - .with_prompt('Analyze business data and provide insights.') - .with_llm(OpenAI(model='gpt-4o')) - .with_output_schema(AnalysisResult) - .build() -) -``` - -## Agent Capabilities - -### Variable Resolution - -Use dynamic variables in agent prompts: - -```python -agent = ( - AgentBuilder() - .with_name('Personalized Assistant') - .with_prompt('Hello ! You are at .') - .with_llm(OpenAI(model='gpt-4o-mini')) - .build() -) - -# Use variables at runtime -variables = { - 'user_name': 'John', - 'user_role': 'Data Scientist', - 'company': 'TechCorp' -} - -response = await agent.run( - 'What should I focus on today?', - variables=variables -) -``` - -### Document Processing - -Process PDF and text documents: - -```python -from flo_ai.models.document import DocumentMessage, DocumentType - -# Create document message -document = DocumentMessage( - document_type=DocumentType.PDF, - document_file_path='report.pdf' -) - -# Process with agent -response = await agent.run([document]) -``` - -### Error Handling - -Built-in retry mechanisms and error recovery: - -```python -robust_agent = ( - AgentBuilder() - .with_name('Reliable Agent') - .with_prompt('You are a reliable assistant.') - .with_llm(OpenAI(model='gpt-4o')) - .with_retries(3) # Retry up to 3 times - .with_timeout(30) # 30 second timeout - .build() -) -``` - -## Best Practices - -### Prompt Engineering - -- **Be specific**: Clearly define the agent's role and capabilities -- **Use examples**: Provide examples of expected inputs and outputs -- **Set boundaries**: Define what the agent should and shouldn't do - -```python -well_prompted_agent = ( - AgentBuilder() - .with_name('Code Reviewer') - .with_prompt(''' - You are an expert code reviewer. Your role is to: - 1. Review code for bugs, security issues, and best practices - 2. Suggest improvements and optimizations - 3. Provide constructive feedback - - Always be specific about issues and provide actionable suggestions. - Focus on code quality, performance, and maintainability. - ''') - .with_llm(OpenAI(model='gpt-4o')) - .build() -) -``` - -### Model Selection - -Choose the right model for your use case: - -- **GPT-4o**: Best for complex reasoning and analysis -- **GPT-4o-mini**: Good balance of performance and cost -- **Claude-3.5-Sonnet**: Excellent for creative tasks -- **Gemini**: Good for multilingual applications - -### Performance Optimization - -```python -# Use streaming for long responses -streaming_agent = ( - AgentBuilder() - .with_name('Content Generator') - .with_prompt('Generate detailed content.') - .with_llm(OpenAI(model='gpt-4o', stream=True)) - .build() -) - -# Use caching for repeated queries -cached_agent = ( - AgentBuilder() - .with_name('Cached Agent') - .with_prompt('You provide consistent responses.') - .with_llm(OpenAI(model='gpt-4o-mini')) - .with_cache(ttl=3600) # Cache for 1 hour - .build() -) -``` - -## Agent Lifecycle - -### Initialization - -```python -# Create agent -agent = AgentBuilder().with_name('My Agent').build() - -# Initialize with configuration -await agent.initialize() -``` - -### Execution - -```python -# Simple execution -response = await agent.run('Hello!') - -# With context -response = await agent.run('Hello!', context={'user_id': '123'}) - -# With variables -response = await agent.run('Hello!', variables={'name': 'John'}) -``` - -### Cleanup - -```python -# Clean up resources -await agent.cleanup() -``` - -## Advanced Features - -### Custom Memory - -```python -from flo_ai.arium.memory import BaseMemory - -class CustomMemory(BaseMemory): - def __init__(self): - self.messages = [] - - def add(self, message): - self.messages.append(message) - - def get(self): - return self.messages - -agent = ( - AgentBuilder() - .with_name('Memory Agent') - .with_prompt('You remember previous conversations.') - .with_llm(OpenAI(model='gpt-4o')) - .with_memory(CustomMemory()) - .build() -) -``` - -### Custom Event Handlers - -```python -async def on_agent_start(agent, input_data): - print(f"Agent {agent.name} started processing") - -async def on_agent_complete(agent, result): - print(f"Agent {agent.name} completed with result: {result}") - -agent = ( - AgentBuilder() - .with_name('Event Agent') - .with_prompt('You are an event-driven agent.') - .with_llm(OpenAI(model='gpt-4o')) - .with_event_handler('start', on_agent_start) - .with_event_handler('complete', on_agent_complete) - .build() -) -``` diff --git a/documentation/essentials/arium.mdx b/documentation/essentials/arium.mdx deleted file mode 100644 index 0bd95b76..00000000 --- a/documentation/essentials/arium.mdx +++ /dev/null @@ -1,377 +0,0 @@ ---- -title: "Arium Workflows" -description: "Create complex multi-agent workflows with Arium orchestration" -icon: "sitemap" ---- - -## What is Arium? - -Arium is Flo AI's powerful workflow orchestration engine for creating complex multi-agent workflows. It allows you to chain agents together, implement conditional routing, and build sophisticated AI systems. - -## Basic Workflow Creation - -### Simple Agent Chain - -Create a linear workflow with multiple agents: - -```python -from flo_ai.arium import AriumBuilder -from flo_ai.agent import Agent -from flo_ai.llm import OpenAI - -async def simple_chain(): - llm = OpenAI(model='gpt-4o-mini') - - # Create agents - analyst = Agent( - name='content_analyst', - system_prompt='Analyze the input and extract key insights.', - llm=llm - ) - - summarizer = Agent( - name='summarizer', - system_prompt='Create a concise summary based on the analysis.', - llm=llm - ) - - # Build and run workflow - result = await ( - AriumBuilder() - .add_agents([analyst, summarizer]) - .start_with(analyst) - .connect(analyst, summarizer) - .end_with(summarizer) - .build_and_run(["Analyze this complex business report..."]) - ) - - return result -``` - -### Conditional Routing - -Route to different agents based on conditions: - -```python -from flo_ai.arium.memory import BaseMemory - -def route_by_type(memory: BaseMemory) -> str: - """Route based on classification result""" - messages = memory.get() - last_message = str(messages[-1]) if messages else "" - - if "technical" in last_message.lower(): - return "tech_specialist" - else: - return "business_specialist" - -# Build workflow with conditional routing -result = await ( - AriumBuilder() - .add_agents([classifier, tech_specialist, business_specialist, final_agent]) - .start_with(classifier) - .add_edge(classifier, [tech_specialist, business_specialist], route_by_type) - .connect(tech_specialist, final_agent) - .connect(business_specialist, final_agent) - .end_with(final_agent) - .build_and_run(["How can we optimize our database performance?"]) -) -``` - -## YAML-Based Workflows - -Define entire workflows in YAML for easy management: - -```yaml -metadata: - name: "content-analysis-workflow" - version: "1.0.0" - description: "Multi-agent content analysis pipeline" - -arium: - agents: - - name: "analyzer" - role: "Content Analyst" - job: "Analyze the input content and extract key insights." - model: - provider: "openai" - name: "gpt-4o-mini" - - - name: "summarizer" - role: "Content Summarizer" - job: "Create a concise summary based on the analysis." - model: - provider: "anthropic" - name: "claude-3-5-sonnet-20240620" - - workflow: - start: "analyzer" - edges: - - from: "analyzer" - to: ["summarizer"] - end: ["summarizer"] -``` - -```python -# Run YAML workflow -result = await ( - AriumBuilder() - .from_yaml(yaml_file='workflow.yaml') - .build_and_run(["Analyze this quarterly business report..."]) -) -``` - -## Advanced Routing - -### LLM-Powered Routers - -Use LLMs for intelligent routing decisions: - -```yaml -routers: - - name: "content_type_router" - type: "smart" # Uses LLM for intelligent routing - routing_options: - technical_writer: "Technical content, documentation, tutorials" - creative_writer: "Creative writing, storytelling, fiction" - marketing_writer: "Marketing copy, sales content, campaigns" - model: - provider: "openai" - name: "gpt-4o-mini" -``` - -### ReflectionRouter - -For A→B→A→C feedback patterns: - -```yaml -routers: - - name: "reflection_router" - type: "reflection" - flow_pattern: [writer, critic, writer] # A → B → A pattern - model: - provider: "openai" - name: "gpt-4o-mini" -``` - -### PlanExecuteRouter - -For Cursor-style plan-and-execute workflows: - -```yaml -routers: - - name: "plan_router" - type: "plan_execute" - agents: - planner: "Creates detailed execution plans" - developer: "Implements features according to plan" - tester: "Tests implementations and validates functionality" - reviewer: "Reviews and approves completed work" - settings: - planner_agent: planner - executor_agent: developer - reviewer_agent: reviewer -``` - -## Workflow Patterns - -### Sequential Processing - -```python -# A → B → C -workflow = ( - AriumBuilder() - .add_agents([agent_a, agent_b, agent_c]) - .start_with(agent_a) - .connect(agent_a, agent_b) - .connect(agent_b, agent_c) - .end_with(agent_c) -) -``` - -### Parallel Processing - -```python -# A → [B, C] → D -workflow = ( - AriumBuilder() - .add_agents([agent_a, agent_b, agent_c, agent_d]) - .start_with(agent_a) - .connect(agent_a, [agent_b, agent_c]) - .connect(agent_b, agent_d) - .connect(agent_c, agent_d) - .end_with(agent_d) -) -``` - -### Fan-out/Fan-in - -```python -# A → [B, C, D] → E -workflow = ( - AriumBuilder() - .add_agents([agent_a, agent_b, agent_c, agent_d, agent_e]) - .start_with(agent_a) - .connect(agent_a, [agent_b, agent_c, agent_d]) - .connect(agent_b, agent_e) - .connect(agent_c, agent_e) - .connect(agent_d, agent_e) - .end_with(agent_e) -) -``` - -## Memory Management - -### Shared Memory - -```python -from flo_ai.arium.memory import MessageMemory - -# Create shared memory -shared_memory = MessageMemory() - -workflow = ( - AriumBuilder() - .add_agents([agent_a, agent_b]) - .with_memory(shared_memory) - .start_with(agent_a) - .connect(agent_a, agent_b) - .end_with(agent_b) -) -``` - -### Custom Memory - -```python -from flo_ai.arium.memory import BaseMemory - -class CustomMemory(BaseMemory): - def __init__(self): - self.data = {} - - def add(self, key, value): - self.data[key] = value - - def get(self, key): - return self.data.get(key) - -custom_memory = CustomMemory() -``` - -## Event Handling - -### Workflow Events - -```python -async def on_workflow_start(workflow, input_data): - print(f"Workflow started with input: {input_data}") - -async def on_workflow_complete(workflow, result): - print(f"Workflow completed with result: {result}") - -async def on_agent_start(agent, input_data): - print(f"Agent {agent.name} started") - -workflow = ( - AriumBuilder() - .add_agents([agent_a, agent_b]) - .with_event_handler('workflow_start', on_workflow_start) - .with_event_handler('workflow_complete', on_workflow_complete) - .with_event_handler('agent_start', on_agent_start) - .start_with(agent_a) - .connect(agent_a, agent_b) - .end_with(agent_b) -) -``` - -## Error Handling - -### Retry Logic - -```python -workflow = ( - AriumBuilder() - .add_agents([agent_a, agent_b]) - .with_retries(3) # Retry failed agents up to 3 times - .with_timeout(60) # 60 second timeout - .start_with(agent_a) - .connect(agent_a, agent_b) - .end_with(agent_b) -) -``` - -### Error Recovery - -```python -async def error_handler(agent, error): - print(f"Agent {agent.name} failed: {error}") - # Implement custom error recovery logic - return "fallback_response" - -workflow = ( - AriumBuilder() - .add_agents([agent_a, agent_b]) - .with_error_handler(error_handler) - .start_with(agent_a) - .connect(agent_a, agent_b) - .end_with(agent_b) -) -``` - -## Performance Optimization - -### Parallel Execution - -```python -# Execute multiple agents in parallel -workflow = ( - AriumBuilder() - .add_agents([agent_a, agent_b, agent_c]) - .start_with(agent_a) - .connect_parallel(agent_a, [agent_b, agent_c]) - .end_with([agent_b, agent_c]) -) -``` - -### Caching - -```python -workflow = ( - AriumBuilder() - .add_agents([agent_a, agent_b]) - .with_cache(ttl=3600) # Cache results for 1 hour - .start_with(agent_a) - .connect(agent_a, agent_b) - .end_with(agent_b) -) -``` - -## Best Practices - -### Workflow Design - -1. **Keep it simple**: Start with linear workflows before adding complexity -2. **Use meaningful names**: Name agents and workflows descriptively -3. **Handle errors**: Always implement error handling and recovery -4. **Test thoroughly**: Test workflows with various inputs - -### Performance Tips - -1. **Use appropriate models**: Choose models based on task complexity -2. **Implement caching**: Cache expensive operations -3. **Optimize routing**: Use efficient routing logic -4. **Monitor performance**: Use telemetry to track workflow performance - -### Debugging - -```python -# Enable debug mode -workflow = ( - AriumBuilder() - .add_agents([agent_a, agent_b]) - .with_debug(True) # Enable debug logging - .start_with(agent_a) - .connect(agent_a, agent_b) - .end_with(agent_b) -) -``` diff --git a/documentation/essentials/yaml-agents.mdx b/documentation/essentials/yaml-agents.mdx deleted file mode 100644 index a50b40c2..00000000 --- a/documentation/essentials/yaml-agents.mdx +++ /dev/null @@ -1,413 +0,0 @@ ---- -title: "YAML-Based Agents" -description: "Create and configure agents using YAML configuration files" -icon: "file-code" ---- - -## YAML Agent Configuration - -Flo AI supports creating agents entirely through YAML configuration files, making it easy to version control, share, and manage agent configurations. - -## Basic YAML Agent - -Create a simple agent using YAML: - -```yaml agent.yaml -metadata: - name: "customer-support-agent" - version: "1.0.0" - description: "Customer support agent for handling inquiries" - -agent: - name: "Customer Support" - prompt: "You are a helpful customer support agent. Provide friendly and accurate assistance." - model: - provider: "openai" - name: "gpt-4o-mini" - temperature: 0.7 - max_tokens: 1000 - retries: 3 -``` - -```python Load YAML Agent -from flo_ai.agent import AgentBuilder - -# Load agent from YAML -agent = AgentBuilder.from_yaml('agent.yaml') -response = await agent.run('How can I reset my password?') -``` - -## Advanced YAML Configuration - -### Agent with Tools - -```yaml tool-agent.yaml -metadata: - name: "calculator-agent" - version: "1.0.0" - -agent: - name: "Calculator Assistant" - prompt: "You are a math assistant that can perform calculations." - model: - provider: "anthropic" - name: "claude-3-5-sonnet-20240620" - temperature: 0.3 - - tools: - - name: "calculate" - description: "Perform mathematical calculations" - function: "calculate" - parameters: - operation: - type: "string" - description: "Mathematical operation (add, subtract, multiply, divide)" - x: - type: "number" - description: "First number" - y: - type: "number" - description: "Second number" -``` - -### Agent with Structured Output - -```yaml structured-agent.yaml -metadata: - name: "analysis-agent" - version: "1.0.0" - -agent: - name: "Business Analyst" - prompt: "Analyze business data and provide structured insights." - model: - provider: "openai" - name: "gpt-4o" - temperature: 0.2 - - output_schema: - type: "pydantic" - model: "AnalysisResult" - fields: - summary: - type: "string" - description: "Executive summary" - key_findings: - type: "array" - description: "List of key findings" - recommendations: - type: "array" - description: "Actionable recommendations" -``` - -### Agent with Variables - -```yaml variable-agent.yaml -metadata: - name: "personalized-agent" - version: "1.0.0" - -agent: - name: "Personalized Assistant" - prompt: | - Hello ! You are a at . - Provide personalized assistance based on the user's role and company context. - model: - provider: "openai" - name: "gpt-4o-mini" - - variables: - - name: "user_name" - description: "User's name" - required: true - - name: "user_role" - description: "User's role" - required: true - - name: "company" - description: "Company name" - required: false - default: "TechCorp" -``` - -## YAML Schema Reference - -### Metadata Section - -```yaml -metadata: - name: "agent-name" # Required: Unique agent identifier - version: "1.0.0" # Required: Semantic version - description: "Agent description" # Optional: Human-readable description - author: "Your Name" # Optional: Agent author - tags: ["tag1", "tag2"] # Optional: Categorization tags -``` - -### Agent Configuration - -```yaml -agent: - name: "Agent Display Name" # Required: Human-readable name - prompt: "System prompt" # Required: Agent's system prompt - model: # Required: LLM configuration - provider: "openai" # Required: openai, anthropic, google, ollama - name: "gpt-4o-mini" # Required: Model name - temperature: 0.7 # Optional: 0.0 to 2.0 - max_tokens: 1000 # Optional: Maximum response length - timeout: 30 # Optional: Request timeout in seconds - - # Optional configurations - retries: 3 # Number of retry attempts - cache_ttl: 3600 # Cache time-to-live in seconds - memory_type: "message" # Memory type: message, custom - tools: [] # List of tools (see tools section) - variables: [] # List of variables (see variables section) - output_schema: {} # Structured output configuration -``` - -### Tools Configuration - -```yaml -tools: - - name: "tool_name" # Required: Tool identifier - description: "Tool description" # Required: Tool description - function: "function_name" # Required: Python function name - parameters: # Required: Function parameters - param1: - type: "string" # Parameter type - description: "Parameter description" - required: true # Whether parameter is required - default: "value" # Default value -``` - -### Variables Configuration - -```yaml -variables: - - name: "variable_name" # Required: Variable name - description: "Description" # Required: Variable description - type: "string" # Optional: string, number, boolean - required: true # Optional: Whether variable is required - default: "default_value" # Optional: Default value -``` - -## Loading and Using YAML Agents - -### Basic Loading - -```python -from flo_ai.agent import AgentBuilder - -# Load from file -agent = AgentBuilder.from_yaml('agent.yaml') - -# Load from string -yaml_content = """ -agent: - name: "Test Agent" - prompt: "You are a test agent." - model: - provider: "openai" - name: "gpt-4o-mini" -""" - -agent = AgentBuilder.from_yaml_string(yaml_content) -``` - -### Using Variables - -```python -# Load agent with variables -agent = AgentBuilder.from_yaml('variable-agent.yaml') - -# Provide variables at runtime -variables = { - 'user_name': 'John', - 'user_role': 'Data Scientist', - 'company': 'TechCorp' -} - -response = await agent.run( - 'What should I focus on today?', - variables=variables -) -``` - -### Tool Integration - -```python -# Define tool function -async def calculate(operation: str, x: float, y: float) -> float: - operations = { - 'add': lambda: x + y, - 'subtract': lambda: x - y, - 'multiply': lambda: x * y, - 'divide': lambda: x / y if y != 0 else 0, - } - return operations.get(operation, lambda: 0)() - -# Load agent with tools -agent = AgentBuilder.from_yaml('tool-agent.yaml') -agent.add_tool(calculate) - -response = await agent.run('Calculate 5 plus 3') -``` - -## Best Practices - -### YAML Structure - -1. **Use meaningful names**: Choose descriptive agent and variable names -2. **Version your configurations**: Always include version numbers -3. **Document thoroughly**: Add descriptions for all components -4. **Validate schemas**: Use YAML schema validation tools - -### Performance Optimization - -```yaml -# Optimize for performance -agent: - name: "Optimized Agent" - prompt: "Concise and effective prompt" - model: - provider: "openai" - name: "gpt-4o-mini" # Use faster model for simple tasks - temperature: 0.3 # Lower temperature for consistency - max_tokens: 500 # Limit response length - cache_ttl: 3600 # Cache responses for 1 hour - retries: 2 # Limit retries to avoid costs -``` - -### Security Considerations - -```yaml -# Secure configuration -agent: - name: "Secure Agent" - prompt: | - You are a secure assistant. Never: - - Share sensitive information - - Execute dangerous commands - - Access unauthorized resources - model: - provider: "openai" - name: "gpt-4o" - temperature: 0.1 # Lower temperature for consistency - max_tokens: 200 # Limit response length - timeout: 10 # Short timeout for security -``` - -## Validation and Testing - -### Schema Validation - -```python -from flo_ai.agent import AgentBuilder -import yaml - -# Validate YAML structure -def validate_agent_yaml(file_path): - try: - with open(file_path, 'r') as f: - config = yaml.safe_load(f) - - # Check required fields - assert 'agent' in config - assert 'name' in config['agent'] - assert 'prompt' in config['agent'] - assert 'model' in config['agent'] - - print("✅ YAML configuration is valid") - return True - except Exception as e: - print(f"❌ YAML validation failed: {e}") - return False - -validate_agent_yaml('agent.yaml') -``` - -### Testing YAML Agents - -```python -import asyncio - -async def test_yaml_agent(): - agent = AgentBuilder.from_yaml('agent.yaml') - - # Test basic functionality - response = await agent.run('Hello!') - assert response is not None - print(f"✅ Agent responds: {response}") - - # Test with variables - if 'variables' in agent.config: - variables = {'test_var': 'test_value'} - response = await agent.run('Test message', variables=variables) - print(f"✅ Agent with variables: {response}") - -asyncio.run(test_yaml_agent()) -``` - -## Examples - -### Customer Support Agent - -```yaml customer-support.yaml -metadata: - name: "customer-support" - version: "1.0.0" - description: "Handles customer inquiries and support requests" - -agent: - name: "Customer Support Agent" - prompt: | - You are a professional customer support agent. Your role is to: - 1. Listen to customer concerns with empathy - 2. Provide accurate and helpful information - 3. Escalate complex issues when necessary - 4. Maintain a friendly and professional tone - - Always be patient, understanding, and solution-oriented. - model: - provider: "openai" - name: "gpt-4o-mini" - temperature: 0.3 - max_tokens: 1000 - retries: 2 -``` - -### Data Analysis Agent - -```yaml data-analyst.yaml -metadata: - name: "data-analyst" - version: "1.0.0" - description: "Analyzes data and provides insights" - -agent: - name: "Data Analyst" - prompt: | - You are an expert data analyst. Analyze the provided data and: - 1. Identify key patterns and trends - 2. Provide statistical insights - 3. Suggest actionable recommendations - 4. Highlight any anomalies or concerns - model: - provider: "openai" - name: "gpt-4o" - temperature: 0.2 - output_schema: - type: "pydantic" - model: "AnalysisResult" - fields: - summary: - type: "string" - description: "Executive summary" - insights: - type: "array" - description: "Key insights" - recommendations: - type: "array" - description: "Actionable recommendations" -``` - -This YAML-based approach makes agent configuration declarative, versionable, and easily shareable across teams! diff --git a/documentation/essentials/yaml-workflows.mdx b/documentation/essentials/yaml-workflows.mdx deleted file mode 100644 index 718685ec..00000000 --- a/documentation/essentials/yaml-workflows.mdx +++ /dev/null @@ -1,562 +0,0 @@ ---- -title: 'YAML Workflows' -description: 'Define complex multi-agent workflows using YAML configuration' -icon: 'file-code' ---- - -## YAML Workflow Configuration - -Flo AI supports defining entire multi-agent workflows in YAML, making it easy to version control, share, and manage complex AI systems. - -## Basic Workflow Structure - -```yaml basic-workflow.yaml -metadata: - name: "content-analysis-workflow" - version: "1.0.0" - description: "Multi-agent content analysis pipeline" - -arium: - agents: - - name: "analyzer" - role: "Content Analyst" - job: "Analyze the input content and extract key insights." - model: - provider: "openai" - name: "gpt-4o-mini" - temperature: 0.3 - - - name: "summarizer" - role: "Content Summarizer" - job: "Create a concise summary based on the analysis." - model: - provider: "anthropic" - name: "claude-3-5-sonnet-20240620" - temperature: 0.2 - - workflow: - start: "analyzer" - edges: - - from: "analyzer" - to: ["summarizer"] - end: ["summarizer"] -``` - -## Advanced Workflow Patterns - -### Conditional Routing - -```yaml conditional-workflow.yaml -metadata: - name: "support-routing-workflow" - version: "1.0.0" - -arium: - agents: - - name: "classifier" - role: "Request Classifier" - job: "Classify incoming requests by type and urgency." - model: - provider: "openai" - name: "gpt-4o-mini" - - - name: "technical_support" - role: "Technical Support" - job: "Handle technical issues and troubleshooting." - model: - provider: "openai" - name: "gpt-4o" - - - name: "billing_support" - role: "Billing Support" - job: "Handle billing and account questions." - model: - provider: "anthropic" - name: "claude-3-5-sonnet-20240620" - - - name: "general_support" - role: "General Support" - job: "Handle general inquiries and questions." - model: - provider: "openai" - name: "gpt-4o-mini" - - routers: - - name: "support_router" - type: "conditional" - routing_logic: | - def route_request(memory): - last_message = str(memory.get()[-1]) if memory.get() else "" - if "technical" in last_message.lower(): - return "technical_support" - elif "billing" in last_message.lower(): - return "billing_support" - else: - return "general_support" - - workflow: - start: "classifier" - edges: - - from: "classifier" - to: ["technical_support", "billing_support", "general_support"] - router: "support_router" - end: ["technical_support", "billing_support", "general_support"] -``` - -### LLM-Powered Routing - -```yaml smart-routing-workflow.yaml -metadata: - name: "smart-content-workflow" - version: "1.0.0" - -arium: - agents: - - name: "content_analyzer" - role: "Content Analyzer" - job: "Analyze content and determine the best processing approach." - model: - provider: "openai" - name: "gpt-4o-mini" - - - name: "technical_writer" - role: "Technical Writer" - job: "Create technical documentation and guides." - model: - provider: "openai" - name: "gpt-4o" - - - name: "creative_writer" - role: "Creative Writer" - job: "Create engaging creative content." - model: - provider: "anthropic" - name: "claude-3-5-sonnet-20240620" - - - name: "marketing_writer" - role: "Marketing Writer" - job: "Create marketing copy and promotional content." - model: - provider: "openai" - name: "gpt-4o" - - routers: - - name: "content_router" - type: "smart" - routing_options: - technical_writer: "Technical content, documentation, tutorials, code examples" - creative_writer: "Creative writing, storytelling, fiction, poetry" - marketing_writer: "Marketing copy, sales content, campaigns, advertisements" - model: - provider: "openai" - name: "gpt-4o-mini" - temperature: 0.1 - - workflow: - start: "content_analyzer" - edges: - - from: "content_analyzer" - to: ["technical_writer", "creative_writer", "marketing_writer"] - router: "content_router" - end: ["technical_writer", "creative_writer", "marketing_writer"] -``` - -## Reflection Patterns - -### A→B→A→C Pattern - -```yaml reflection-workflow.yaml -metadata: - name: "reflection-writing-workflow" - version: "1.0.0" - -arium: - agents: - - name: "writer" - role: "Content Writer" - job: "Write initial content based on requirements." - model: - provider: "openai" - name: "gpt-4o" - - - name: "critic" - role: "Content Critic" - job: "Review and critique the written content." - model: - provider: "anthropic" - name: "claude-3-5-sonnet-20240620" - - - name: "finalizer" - role: "Content Finalizer" - job: "Create the final polished version." - model: - provider: "openai" - name: "gpt-4o" - - routers: - - name: "reflection_router" - type: "reflection" - flow_pattern: ["writer", "critic", "writer"] - model: - provider: "openai" - name: "gpt-4o-mini" - - workflow: - start: "writer" - edges: - - from: "writer" - to: ["critic"] - - from: "critic" - to: ["writer"] - router: "reflection_router" - - from: "writer" - to: ["finalizer"] - end: ["finalizer"] -``` - -## Plan-Execute Workflows - -### Cursor-Style Development - -```yaml plan-execute-workflow.yaml -metadata: - name: "development-workflow" - version: "1.0.0" - -arium: - agents: - - name: "planner" - role: "Development Planner" - job: "Create detailed execution plans for development tasks." - model: - provider: "openai" - name: "gpt-4o" - - - name: "developer" - role: "Code Developer" - job: "Implement features according to the plan." - model: - provider: "openai" - name: "gpt-4o" - - - name: "tester" - role: "Code Tester" - job: "Test implementations and validate functionality." - model: - provider: "anthropic" - name: "claude-3-5-sonnet-20240620" - - - name: "reviewer" - role: "Code Reviewer" - job: "Review and approve completed work." - model: - provider: "openai" - name: "gpt-4o" - - routers: - - name: "plan_execute_router" - type: "plan_execute" - settings: - planner_agent: "planner" - executor_agent: "developer" - reviewer_agent: "reviewer" - max_iterations: 3 - model: - provider: "openai" - name: "gpt-4o-mini" - - workflow: - start: "planner" - edges: - - from: "planner" - to: ["developer"] - - from: "developer" - to: ["tester"] - - from: "tester" - to: ["reviewer"] - router: "plan_execute_router" - end: ["reviewer"] -``` - -## Parallel Processing - -### Fan-out/Fan-in Pattern - -```yaml parallel-workflow.yaml -metadata: - name: "parallel-analysis-workflow" - version: "1.0.0" - -arium: - agents: - - name: "coordinator" - role: "Analysis Coordinator" - job: "Coordinate parallel analysis tasks." - model: - provider: "openai" - name: "gpt-4o-mini" - - - name: "sentiment_analyzer" - role: "Sentiment Analyzer" - job: "Analyze sentiment and emotional tone." - model: - provider: "openai" - name: "gpt-4o-mini" - - - name: "topic_extractor" - role: "Topic Extractor" - job: "Extract main topics and themes." - model: - provider: "anthropic" - name: "claude-3-5-sonnet-20240620" - - - name: "keyword_analyzer" - role: "Keyword Analyzer" - job: "Extract and analyze keywords." - model: - provider: "openai" - name: "gpt-4o-mini" - - - name: "synthesizer" - role: "Analysis Synthesizer" - job: "Combine all analysis results into a comprehensive report." - model: - provider: "openai" - name: "gpt-4o" - - workflow: - start: "coordinator" - edges: - - from: "coordinator" - to: ["sentiment_analyzer", "topic_extractor", "keyword_analyzer"] - - from: ["sentiment_analyzer", "topic_extractor", "keyword_analyzer"] - to: ["synthesizer"] - end: ["synthesizer"] -``` - -## Complex Multi-Step Workflows - -### Research and Analysis Pipeline - -```yaml research-workflow.yaml -metadata: - name: "research-pipeline" - version: "1.0.0" - -arium: - agents: - - name: "researcher" - role: "Research Agent" - job: "Conduct research on the given topic." - model: - provider: "openai" - name: "gpt-4o" - tools: - - name: "web_search" - description: "Search the web for information" - - name: "database_query" - description: "Query internal databases" - - - name: "analyzer" - role: "Data Analyst" - job: "Analyze research data and identify patterns." - model: - provider: "openai" - name: "gpt-4o" - - - name: "synthesizer" - role: "Content Synthesizer" - job: "Synthesize findings into coherent insights." - model: - provider: "anthropic" - name: "claude-3-5-sonnet-20240620" - - - name: "validator" - role: "Content Validator" - job: "Validate accuracy and completeness of findings." - model: - provider: "openai" - name: "gpt-4o" - - - name: "presenter" - role: "Presentation Creator" - job: "Create final presentation of findings." - model: - provider: "openai" - name: "gpt-4o" - - workflow: - start: "researcher" - edges: - - from: "researcher" - to: ["analyzer"] - - from: "analyzer" - to: ["synthesizer"] - - from: "synthesizer" - to: ["validator"] - - from: "validator" - to: ["presenter"] - end: ["presenter"] -``` - -## Workflow Configuration Options - -### Memory Configuration - -```yaml memory-workflow.yaml -metadata: - name: "memory-workflow" - version: "1.0.0" - -arium: - memory: - type: "message" - max_messages: 10 - include_metadata: true - - agents: - - name: "conversational_agent" - role: "Conversational Agent" - job: "Maintain context across multiple interactions." - model: - provider: "openai" - name: "gpt-4o" - memory: - enabled: true - max_context: 5 - - workflow: - start: "conversational_agent" - end: ["conversational_agent"] -``` - -### Error Handling - -```yaml error-handling-workflow.yaml -metadata: - name: "robust-workflow" - version: "1.0.0" - -arium: - error_handling: - max_retries: 3 - retry_delay: 1.0 - fallback_agent: "fallback_handler" - timeout: 30 - - agents: - - name: "primary_agent" - role: "Primary Processor" - job: "Main processing agent." - model: - provider: "openai" - name: "gpt-4o" - retries: 2 - timeout: 20 - - - name: "fallback_handler" - role: "Fallback Handler" - job: "Handle errors and provide fallback responses." - model: - provider: "openai" - name: "gpt-4o-mini" - - workflow: - start: "primary_agent" - edges: - - from: "primary_agent" - to: ["fallback_handler"] - on_error: true - end: ["primary_agent", "fallback_handler"] -``` - -## Loading and Executing YAML Workflows - -### Basic Loading - -```python -from flo_ai.arium import AriumBuilder - -# Load workflow from file -workflow = AriumBuilder.from_yaml('workflow.yaml') - -# Execute workflow -result = await workflow.build_and_run(["Input data here"]) -``` - -### Advanced Execution - -```python -# Load with custom configuration -workflow = AriumBuilder.from_yaml( - 'workflow.yaml', - config_overrides={ - 'agents.analyzer.model.temperature': 0.1, - 'agents.summarizer.model.temperature': 0.5 - } -) - -# Execute with variables -result = await workflow.build_and_run( - ["Input data"], - variables={ - 'user_id': '123', - 'priority': 'high' - } -) -``` - -### Workflow Validation - -```python -# Validate workflow before execution -try: - workflow = AriumBuilder.from_yaml('workflow.yaml') - workflow.validate() - print("✅ Workflow is valid") -except Exception as e: - print(f"❌ Workflow validation failed: {e}") -``` - -## Best Practices - -### YAML Structure - -1. **Use meaningful names**: Choose descriptive agent and workflow names -2. **Version your workflows**: Always include version numbers -3. **Document thoroughly**: Add descriptions for all components -4. **Validate schemas**: Use YAML schema validation tools - -### Performance Optimization - -```yaml -# Optimize for performance -arium: - agents: - - name: "fast_agent" - model: - provider: "openai" - name: "gpt-4o-mini" # Use faster model - temperature: 0.1 # Lower temperature - max_tokens: 500 # Limit response length - cache_ttl: 3600 # Cache for 1 hour - timeout: 10 # Short timeout -``` - -### Security Considerations - -```yaml -# Secure workflow configuration -arium: - agents: - - name: "secure_agent" - model: - provider: "openai" - name: "gpt-4o" - temperature: 0.1 # Lower temperature for consistency - max_tokens: 200 # Limit response length - timeout: 5 # Short timeout - retries: 1 # Limit retries -``` - -This YAML-based approach makes complex multi-agent workflows declarative, versionable, and easily shareable across teams! diff --git a/documentation/essentials/images.mdx b/documentation/flo-ai/advanced/images.mdx similarity index 100% rename from documentation/essentials/images.mdx rename to documentation/flo-ai/advanced/images.mdx diff --git a/documentation/essentials/llm-providers.mdx b/documentation/flo-ai/advanced/llm-providers.mdx similarity index 100% rename from documentation/essentials/llm-providers.mdx rename to documentation/flo-ai/advanced/llm-providers.mdx diff --git a/documentation/essentials/markdown.mdx b/documentation/flo-ai/advanced/markdown.mdx similarity index 100% rename from documentation/essentials/markdown.mdx rename to documentation/flo-ai/advanced/markdown.mdx diff --git a/documentation/essentials/navigation.mdx b/documentation/flo-ai/advanced/navigation.mdx similarity index 100% rename from documentation/essentials/navigation.mdx rename to documentation/flo-ai/advanced/navigation.mdx diff --git a/documentation/essentials/reusable-snippets.mdx b/documentation/flo-ai/advanced/reusable-snippets.mdx similarity index 100% rename from documentation/essentials/reusable-snippets.mdx rename to documentation/flo-ai/advanced/reusable-snippets.mdx diff --git a/documentation/essentials/routing.mdx b/documentation/flo-ai/advanced/routing.mdx similarity index 100% rename from documentation/essentials/routing.mdx rename to documentation/flo-ai/advanced/routing.mdx diff --git a/documentation/essentials/settings.mdx b/documentation/flo-ai/advanced/settings.mdx similarity index 100% rename from documentation/essentials/settings.mdx rename to documentation/flo-ai/advanced/settings.mdx diff --git a/documentation/essentials/telemetry.mdx b/documentation/flo-ai/advanced/telemetry.mdx similarity index 100% rename from documentation/essentials/telemetry.mdx rename to documentation/flo-ai/advanced/telemetry.mdx diff --git a/documentation/essentials/tools.mdx b/documentation/flo-ai/advanced/tools.mdx similarity index 100% rename from documentation/essentials/tools.mdx rename to documentation/flo-ai/advanced/tools.mdx diff --git a/documentation/flo-ai/core-features/agents.mdx b/documentation/flo-ai/core-features/agents.mdx new file mode 100644 index 00000000..c82af978 --- /dev/null +++ b/documentation/flo-ai/core-features/agents.mdx @@ -0,0 +1,411 @@ +--- +title: "Agents" +description: "Learn how to create and configure AI agents with Flo AI" +icon: "robot" +--- + +## Creating Agents + +Agents are the core building blocks of Flo AI. They represent AI-powered entities that can process inputs, use tools, and generate responses. + +### AgentBuilder Methods + +The `AgentBuilder` class provides a fluent interface for configuring agents. All methods return `self` for method chaining. Here's a complete reference: + +| Method | Description | Parameters | +|--------|-------------|------------| +| `with_name(name: str)` | Set the agent's name | `name`: Display name for the agent | +| `with_prompt(system_prompt: str \| AssistantMessage)` | Set the system prompt | `system_prompt`: Instructions defining agent behavior | +| `with_llm(llm: BaseLLM)` | Configure the LLM provider | `llm`: Instance of OpenAI, Anthropic, Gemini, etc. | +| `with_tools(tools: List[Tool])` | Add tools to the agent | `tools`: List of Tool objects, ToolConfig, or tool dicts | +| `add_tool(tool: Tool, **prefilled_params)` | Add a single tool with optional pre-filled parameters | `tool`: Tool object, `**prefilled_params`: Parameters to pre-fill | +| `with_reasoning(pattern: ReasoningPattern)` | Set reasoning pattern | `pattern`: `REACT`, `COT`, or `DIRECT` | +| `with_retries(max_retries: int)` | Set maximum retry attempts | `max_retries`: Number of retries on failure (default: 3) | +| `with_output_schema(schema: Dict \| Type[BaseModel])` | Set structured output schema | `schema`: Pydantic model class or JSON schema dict | +| `with_role(role: str)` | Set the agent's role | `role`: Internal role description | +| `with_actas(act_as: str)` | Set how agent presents itself | `act_as`: Message role (e.g., 'assistant', 'user') | +| `build()` | Create and return the configured Agent | Returns: `Agent` instance | + +**Note:** `with_llm()` is required before calling `build()`. All other methods are optional. + +### Basic Agent Creation + +Create a simple conversational agent: + +```python +from flo_ai.agent import AgentBuilder +from flo_ai.llm import OpenAI + +agent = ( + AgentBuilder() + .with_name('Customer Support') + .with_prompt('You are a helpful customer support agent.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .build() +) + +response = await agent.run('How can I reset my password?') +``` + +### Agent Configuration + +Configure agents with various options: + +```python +agent = ( + AgentBuilder() + .with_name('Data Analyst') + .with_prompt('You are an expert data analyst.') + .with_llm(OpenAI(model='gpt-4o', temperature=0.3)) + .with_retries(3) # Retry on failure + .build() +) +``` + +## Agent Types + +### Conversational Agents + +Basic agents for chat and Q&A: + +```python +conversational_agent = ( + AgentBuilder() + .with_name('Chat Assistant') + .with_prompt('You are a friendly conversational assistant.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .build() +) +``` + +### Tool-Using Agents + +Agents that can use external tools: + +```python +from flo_ai.tool import flo_tool + +@flo_tool(description="Get weather information") +async def get_weather(city: str) -> str: + return f"Weather in {city}: sunny, 25°C" + +tool_agent = ( + AgentBuilder() + .with_name('Weather Assistant') + .with_prompt('You help users get weather information.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .with_tools([get_weather.tool]) + .build() +) +``` + +### Structured Output Agents + +Agents that return structured data: + +```python +from pydantic import BaseModel, Field + +class AnalysisResult(BaseModel): + summary: str = Field(description="Executive summary") + key_findings: list = Field(description="List of key findings") + recommendations: list = Field(description="Actionable recommendations") + +structured_agent = ( + AgentBuilder() + .with_name('Business Analyst') + .with_prompt('Analyze business data and provide insights.') + .with_llm(OpenAI(model='gpt-4o')) + .with_output_schema(AnalysisResult) + .build() +) +``` + +## Agent Capabilities + +### Variable Resolution + +Use dynamic variables in agent prompts: + +```python +agent = ( + AgentBuilder() + .with_name('Personalized Assistant') + .with_prompt('Hello ! You are at .') + .with_llm(OpenAI(model='gpt-4o-mini')) + .build() +) + +# Use variables at runtime +variables = { + 'user_name': 'John', + 'user_role': 'Data Scientist', + 'company': 'TechCorp' +} + +response = await agent.run( + 'What should I focus on today?', + variables=variables +) +``` + +### Document Processing + +Process PDF and text documents: + +```python +from flo_ai.models import DocumentMessageContent, UserMessage +from flo_ai.models.document import DocumentType +import base64 + +# Read file and encode as base64 +with open('report.pdf', 'rb') as f: + pdf_bytes = f.read() + pdf_base64 = base64.b64encode(pdf_bytes).decode('utf-8') + +# Create document message +document = UserMessage( + content=DocumentMessageContent( + mime_type=DocumentType.PDF.value, + base64=pdf_base64 + ) +) + +# Process with agent +response = await agent.run([document, "Analyse the document"]) +``` + +### Error Handling + +Built-in retry mechanisms and error recovery: + +```python +robust_agent = ( + AgentBuilder() + .with_name('Reliable Agent') + .with_prompt('You are a reliable assistant.') + .with_llm(OpenAI(model='gpt-4o')) + .with_retries(3) # Retry up to 3 times + .build() +) +``` + +### Conversation History + +Agents automatically maintain conversation history across multiple interactions. The `run()` method returns the complete conversation history as a list of messages. + +```python +agent = ( + AgentBuilder() + .with_name('Chat Assistant') + .with_prompt('You are a helpful assistant.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .build() +) + +# First interaction +response1 = await agent.run('Hello, my name is Alice.') +print(f"Response: {response1[-1].content}") # Get last message + +# Second interaction - agent remembers the conversation +response2 = await agent.run('What is my name?') +print(f"Response: {response2[-1].content}") # Agent knows the name is Alice + +# Access full conversation history +for message in agent.conversation_history: + print(f"{message.role}: {message.content}") +``` + +#### Accessing Conversation History + +The conversation history is stored in the `conversation_history` attribute: + +```python +# Get all messages +all_messages = agent.conversation_history + +# Get the last message +last_message = agent.conversation_history[-1] + +# Filter messages by role +from flo_ai.models import UserMessage, AssistantMessage + +user_messages = [ + msg for msg in agent.conversation_history + if isinstance(msg, UserMessage) +] + +assistant_messages = [ + msg for msg in agent.conversation_history + if isinstance(msg, AssistantMessage) +] +``` + +#### Clearing History + +Clear the conversation history to start a new conversation: + +```python +# Clear all conversation history +agent.clear_history() + +# Now the agent starts fresh +response = await agent.run('Hello!') +``` + +#### Manual History Management + +You can manually add messages to the conversation history: + +```python +from flo_ai.models import UserMessage, AssistantMessage + +# Add a user message +agent.add_to_history(UserMessage('Previous context')) + +# Add multiple messages at once +agent.add_to_history([ + UserMessage('Message 1'), + AssistantMessage('Response 1'), + UserMessage('Message 2') +]) +``` + +## Best Practices + +### Prompt Engineering + +- **Be specific**: Clearly define the agent's role and capabilities +- **Use examples**: Provide examples of expected inputs and outputs +- **Set boundaries**: Define what the agent should and shouldn't do + +```python +well_prompted_agent = ( + AgentBuilder() + .with_name('Code Reviewer') + .with_prompt(''' + You are an expert code reviewer. Your role is to: + 1. Review code for bugs, security issues, and best practices + 2. Suggest improvements and optimizations + 3. Provide constructive feedback + + Always be specific about issues and provide actionable suggestions. + Focus on code quality, performance, and maintainability. + ''') + .with_llm(OpenAI(model='gpt-4o')) + .build() +) +``` + +### Model Selection + +Choose the right model for your use case: + +- **GPT-4o**: Best for complex reasoning and analysis +- **GPT-4o-mini**: Good balance of performance and cost +- **Claude-3.5-Sonnet**: Excellent for creative tasks +- **Gemini**: Good for multilingual applications + +### Performance Optimization + +```python +# Configure LLM with appropriate settings for performance +optimized_agent = ( + AgentBuilder() + .with_name('Content Generator') + .with_prompt('Generate detailed content.') + .with_llm(OpenAI(model='gpt-4o-mini', temperature=0.7)) + .with_retries(2) # Reduce retries for faster failure + .build() +) +``` + +## Agent Lifecycle + +### Creation + +```python +# Create agent +agent = ( + AgentBuilder() + .with_name('My Agent') + .with_prompt('You are a helpful assistant.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .build() +) +``` + +### Execution + +```python +# Simple execution +response = await agent.run('Hello!') + +# With variables +response = await agent.run('Hello!', variables={'name': 'John'}) + +# With multiple messages +from flo_ai.models import UserMessage +response = await agent.run([ + UserMessage('First message'), + UserMessage('Second message') +]) +``` + +## Advanced Features + +### Reasoning Patterns + +Configure agents to use different reasoning patterns: + +```python +from flo_ai.agent import ReasoningPattern + +# ReACT pattern - for tool-using agents that need structured reasoning +react_agent = ( + AgentBuilder() + .with_name('ReACT Agent') + .with_prompt('You solve problems step by step.') + .with_llm(OpenAI(model='gpt-4o')) + .with_tools([get_weather.tool]) + .with_reasoning(ReasoningPattern.REACT) + .build() +) + +# Chain of Thought pattern - for complex reasoning tasks +cot_agent = ( + AgentBuilder() + .with_name('CoT Agent') + .with_prompt('You think through problems carefully.') + .with_llm(OpenAI(model='gpt-4o')) + .with_tools([get_weather.tool]) + .with_reasoning(ReasoningPattern.COT) + .build() +) + +# Direct pattern (default) - for straightforward tasks +direct_agent = ( + AgentBuilder() + .with_name('Direct Agent') + .with_prompt('You provide direct answers.') + .with_llm(OpenAI(model='gpt-4o')) + .with_reasoning(ReasoningPattern.DIRECT) + .build() +) +``` + +### Role and Act-As Configuration + +Configure agent roles and how they present themselves: + +```python +agent = ( + AgentBuilder() + .with_name('Customer Support') + .with_prompt('You help customers with their questions.') + .with_role('Senior Support Specialist') # Internal role description + .with_actas('assistant') # How the agent presents itself in messages + .with_llm(OpenAI(model='gpt-4o-mini')) + .build() +) +``` diff --git a/documentation/flo-ai/core-features/arium.mdx b/documentation/flo-ai/core-features/arium.mdx new file mode 100644 index 00000000..ca662f72 --- /dev/null +++ b/documentation/flo-ai/core-features/arium.mdx @@ -0,0 +1,620 @@ +--- +title: "Arium Workflows" +description: "Create complex multi-agent workflows with Arium orchestration" +icon: "sitemap" +--- + +## What is Arium? + +Arium is Flo AI's powerful workflow orchestration engine for creating complex multi-agent workflows. It allows you to chain agents together, implement conditional routing, and build sophisticated AI systems. + +### AriumBuilder Methods + +The `AriumBuilder` class provides a fluent interface for configuring workflows. All methods return `self` for method chaining. Here's a complete reference: + +| Method | Description | Parameters | +|--------|-------------|------------| +| `with_memory(memory: MessageMemory)` | Set shared memory for the workflow | `memory`: MessageMemory instance | +| `add_agent(agent: Agent)` | Add a single agent to the workflow | `agent`: Agent instance | +| `add_agents(agents: List[Agent])` | Add multiple agents to the workflow | `agents`: List of Agent instances | +| `add_function_node(node: FunctionNode)` | Add a function node to the workflow | `node`: FunctionNode instance | +| `add_function_nodes(nodes: List[FunctionNode])` | Add multiple function nodes | `nodes`: List of FunctionNode instances | +| `add_arium(arium: Arium, name: str, inherit_variables: bool)` | Add a nested Arium workflow as a node | `arium`: Arium instance, `name`: Optional name, `inherit_variables`: Whether to inherit variables | +| `add_foreach(name: str, execute_node: AriumNodeType)` | Add a ForEach node for batch processing | `name`: Node name, `execute_node`: Node to execute on each item | +| `start_with(node: AriumNodeType \| str)` | Set the starting node | `node`: Agent, FunctionNode, AriumNode, or node name string | +| `end_with(node: AriumNodeType)` | Add an ending node | `node`: Agent, FunctionNode, or AriumNode | +| `connect(from_node: AriumNodeType, to_node: AriumNodeType)` | Connect two nodes directly | `from_node`: Source node, `to_node`: Target node | +| `add_edge(from_node: AriumNodeType, to_nodes: List[AriumNodeType], router: Callable)` | Add edge with optional router function | `from_node`: Source node, `to_nodes`: List of target nodes, `router`: Optional routing function | +| `from_yaml(yaml_str: str, yaml_file: str, ...)` | Create builder from YAML configuration | `yaml_str`: YAML string, `yaml_file`: Path to YAML file, plus optional registries | +| `build()` | Build and return the Arium instance | Returns: `Arium` instance | +| `build_and_run(inputs, variables: Dict)` | Build and run the workflow | `inputs`: List of messages or string, `variables`: Optional runtime variables | +| `visualize(output_path: str, title: str)` | Generate workflow visualization | `output_path`: Path for graph image, `title`: Graph title | +| `reset()` | Reset builder to start fresh | Returns: `AriumBuilder` instance | + +**Note:** `start_with()` and `end_with()` are required before calling `build()`. All other methods are optional. + +## Node Types + +Arium workflows support several types of nodes, each serving different purposes: + +### Agent Nodes + +Agents are the primary executable nodes in Arium workflows. They use LLMs to process inputs and generate responses. + +```python +from flo_ai.agent import AgentBuilder +from flo_ai.llm import OpenAI + +agent = ( + AgentBuilder() + .with_name('analyzer') + .with_prompt('Analyze the input content.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .build() +) + +# Use agent in workflow +workflow = ( + AriumBuilder() + .add_agent(agent) + .start_with(agent) + .end_with(agent) +) +``` + +### Function Nodes + +Function nodes allow you to execute custom Python functions within workflows. They can be synchronous or asynchronous. + +```python +from flo_ai.arium.nodes import FunctionNode + +# Synchronous function +def process_data(inputs, variables=None, **kwargs): + # Process inputs + result = f"Processed: {inputs}" + return result + +# Asynchronous function +async def async_process(inputs, variables=None, **kwargs): + # Async processing + await asyncio.sleep(0.1) + return f"Async processed: {inputs}" + +# Create function nodes +sync_node = FunctionNode( + name='data_processor', + description='Processes input data', + function=process_data +) + +async_node = FunctionNode( + name='async_processor', + description='Asynchronously processes data', + function=async_process +) + +# Use in workflow +workflow = ( + AriumBuilder() + .add_function_node(sync_node) + .add_function_node(async_node) + .start_with(sync_node) + .connect(sync_node, async_node) + .end_with(async_node) +) +``` + +### Arium Nodes (Nested Workflows) + +Arium nodes allow you to embed one workflow inside another, creating hierarchical workflows with isolated memory. + +```python +# Create a sub-workflow +sub_workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b]) + .start_with(agent_a) + .connect(agent_a, agent_b) + .end_with(agent_b) + .build() +) + +# Embed as a node in parent workflow +from flo_ai.arium.nodes import AriumNode + +nested_node = AriumNode( + name='sub_workflow', + arium=sub_workflow, + inherit_variables=True # Pass parent variables to sub-workflow +) + +# Use in parent workflow +parent_workflow = ( + AriumBuilder() + .add_agent(main_agent) + .add_arium(sub_workflow, name='sub_workflow', inherit_variables=True) + .start_with(main_agent) + .connect(main_agent, nested_node) + .end_with(nested_node) +) +``` + +### ForEach Nodes + +ForEach nodes execute a node on each item in a collection, useful for batch processing. + +```python +from flo_ai.arium.nodes import ForEachNode + +# Create a ForEach node that processes each item with an agent +foreach_node = ForEachNode( + name='batch_processor', + execute_node=agent # Agent to execute on each item +) + +# Use in workflow +workflow = ( + AriumBuilder() + .add_agent(agent) + .add_foreach('batch_processor', agent) + .start_with(foreach_node) + .end_with(foreach_node) +) + +# When run with multiple inputs, each input is processed sequentially +result = await workflow.run([ + "Process item 1", + "Process item 2", + "Process item 3" +]) +``` + +### Combining Node Types + +You can combine different node types in a single workflow: + +```python +# Create different node types +agent = AgentBuilder().with_name('agent').with_prompt('...').with_llm(llm).build() +function_node = FunctionNode(name='processor', description='...', function=process_func) +sub_arium = AriumBuilder().add_agents([...]).build() +nested_node = AriumNode(name='nested', arium=sub_arium) +foreach_node = ForEachNode(name='batch', execute_node=agent) + +# Combine in workflow +workflow = ( + AriumBuilder() + .add_agent(agent) + .add_function_node(function_node) + .add_arium(sub_arium, name='nested') + .add_foreach('batch', agent) + .start_with(agent) + .connect(agent, function_node) + .connect(function_node, nested_node) + .connect(nested_node, foreach_node) + .end_with(foreach_node) +) +``` + +## Basic Workflow Creation + +### Simple Agent Chain + +Create a linear workflow with multiple agents: + +```python +from flo_ai.arium import AriumBuilder +from flo_ai.agent import AgentBuilder +from flo_ai.llm import OpenAI + +async def simple_chain(): + llm = OpenAI(model='gpt-4o-mini') + + # Create agents using AgentBuilder + analyst = ( + AgentBuilder() + .with_name('content_analyst') + .with_prompt('Analyze the input and extract key insights.') + .with_llm(llm) + .build() + ) + + summarizer = ( + AgentBuilder() + .with_name('summarizer') + .with_prompt('Create a concise summary based on the analysis.') + .with_llm(llm) + .build() + ) + + # Build and run workflow + result = await ( + AriumBuilder() + .add_agents([analyst, summarizer]) + .start_with(analyst) + .connect(analyst, summarizer) + .end_with(summarizer) + .build_and_run(["Analyze this complex business report..."]) + ) + + return result +``` + +### Conditional Routing + +Route to different agents based on conditions: + +```python +from flo_ai.arium.memory import MessageMemory, MessageMemoryItem +from typing import List + +def route_by_type(memory: MessageMemory) -> str: + """Route based on classification result""" + messages: List[MessageMemoryItem] = memory.get() + + if not messages: + return "business_specialist" # Default route + + # Access the last message result + last_message_item = messages[-1] + last_message_content = str(last_message_item.result) + + if "technical" in last_message_content.lower(): + return "tech_specialist" + else: + return "business_specialist" + +# Build workflow with conditional routing +result = await ( + AriumBuilder() + .add_agents([classifier, tech_specialist, business_specialist, final_agent]) + .start_with(classifier) + .add_edge(classifier, [tech_specialist, business_specialist], route_by_type) + .connect(tech_specialist, final_agent) + .connect(business_specialist, final_agent) + .end_with(final_agent) + .build_and_run(["How can we optimize our database performance?"]) +) +``` + +## YAML-Based Workflows + +Define entire workflows in YAML for easy management: + +```yaml +metadata: + name: "content-analysis-workflow" + version: "1.0.0" + description: "Multi-agent content analysis pipeline" + +arium: + agents: + - name: "analyzer" + role: "Content Analyst" + job: "Analyze the input content and extract key insights." + model: + provider: "openai" + name: "gpt-4o-mini" + + - name: "summarizer" + role: "Content Summarizer" + job: "Create a concise summary based on the analysis." + model: + provider: "anthropic" + name: "claude-3-5-sonnet-20240620" + + workflow: + start: "analyzer" + edges: + - from: "analyzer" + to: ["summarizer"] + end: ["summarizer"] +``` + +```python +# Run YAML workflow +result = await ( + AriumBuilder() + .from_yaml(yaml_file='workflow.yaml') + .build_and_run(["Analyze this quarterly business report..."]) +) +``` + +## Advanced Routing + +### LLM-Powered Routers + +Use LLMs for intelligent routing decisions: + +```yaml +routers: + - name: "content_type_router" + type: "smart" # Uses LLM for intelligent routing + routing_options: + technical_writer: "Technical content, documentation, tutorials" + creative_writer: "Creative writing, storytelling, fiction" + marketing_writer: "Marketing copy, sales content, campaigns" + model: + provider: "openai" + name: "gpt-4o-mini" +``` + +### ReflectionRouter + +For A→B→A→C feedback patterns: + +```yaml +routers: + - name: "reflection_router" + type: "reflection" + flow_pattern: [writer, critic, writer] # A → B → A pattern + model: + provider: "openai" + name: "gpt-4o-mini" +``` + +### PlanExecuteRouter + +For Cursor-style plan-and-execute workflows: + +```yaml +routers: + - name: "plan_router" + type: "plan_execute" + agents: + planner: "Creates detailed execution plans" + developer: "Implements features according to plan" + tester: "Tests implementations and validates functionality" + reviewer: "Reviews and approves completed work" + settings: + planner_agent: planner + executor_agent: developer + reviewer_agent: reviewer +``` + +## Workflow Patterns + +### Sequential Processing + +```python +# A → B → C +workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b, agent_c]) + .start_with(agent_a) + .connect(agent_a, agent_b) + .connect(agent_b, agent_c) + .end_with(agent_c) +) +``` + +### Parallel Processing + +```python +# A → [B, C] → D +workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b, agent_c, agent_d]) + .start_with(agent_a) + .connect(agent_a, [agent_b, agent_c]) + .connect(agent_b, agent_d) + .connect(agent_c, agent_d) + .end_with(agent_d) +) +``` + +### Fan-out/Fan-in + +```python +# A → [B, C, D] → E +workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b, agent_c, agent_d, agent_e]) + .start_with(agent_a) + .connect(agent_a, [agent_b, agent_c, agent_d]) + .connect(agent_b, agent_e) + .connect(agent_c, agent_e) + .connect(agent_d, agent_e) + .end_with(agent_e) +) +``` + +## Memory Management + +### Shared Memory + +```python +from flo_ai.arium.memory import MessageMemory + +# Create shared memory +shared_memory = MessageMemory() + +workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b]) + .with_memory(shared_memory) + .start_with(agent_a) + .connect(agent_a, agent_b) + .end_with(agent_b) +) +``` + +### Custom Memory + +You can extend `MessageMemory` to add custom functionality: + +```python +from flo_ai.arium.memory import MessageMemory, MessageMemoryItem +from typing import List, Optional + +class CustomMemory(MessageMemory): + def __init__(self): + super().__init__() + # Add any custom attributes here + self.custom_data = {} + + def get_filtered_by_custom_logic(self, filter_func) -> List[MessageMemoryItem]: + """Custom method to filter messages""" + return [msg for msg in self.messages if filter_func(msg)] + +# Use custom memory in workflow +custom_memory = CustomMemory() +workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b]) + .with_memory(custom_memory) + .start_with(agent_a) + .connect(agent_a, agent_b) + .end_with(agent_b) +) +``` + +## Workflow Execution + +### Running Workflows + +Workflows can be executed using `build_and_run()` or by building first and then running: + +```python +# Build and run in one step +result = await ( + AriumBuilder() + .add_agents([agent_a, agent_b]) + .start_with(agent_a) + .connect(agent_a, agent_b) + .end_with(agent_b) + .build_and_run(["Process this input"]) +) + +# Or build first, then run +workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b]) + .start_with(agent_a) + .connect(agent_a, agent_b) + .end_with(agent_b) + .build() +) + +result = await workflow.run(["Process this input"]) + +# With variables +result = await workflow.run( + ["Process this input"], + variables={'user_id': '123', 'context': 'production'} +) +``` + +## Error Handling + +Error handling in Arium workflows is managed at the agent level. Configure retries and error handling when building agents: + +```python +from flo_ai.agent import AgentBuilder +from flo_ai.llm import OpenAI + +# Create agents with retry configuration +agent_a = ( + AgentBuilder() + .with_name('agent_a') + .with_prompt('Process the input.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .with_retries(3) # Retry up to 3 times on failure + .build() +) + +agent_b = ( + AgentBuilder() + .with_name('agent_b') + .with_prompt('Continue processing.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .with_retries(2) # Retry up to 2 times on failure + .build() +) + +# Build workflow with error-resilient agents +workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b]) + .start_with(agent_a) + .connect(agent_a, agent_b) + .end_with(agent_b) + .build() +) +``` + +## Performance Optimization + +### Parallel Execution + +Arium automatically executes agents in parallel when multiple agents are connected from the same source node: + +```python +# Agents B and C will execute in parallel after A completes +workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b, agent_c, agent_d]) + .start_with(agent_a) + .add_edge(agent_a, [agent_b, agent_c]) # Parallel execution + .connect(agent_b, agent_d) + .connect(agent_c, agent_d) + .end_with(agent_d) +) +``` + +### Workflow Visualization + +Visualize your workflow to understand the execution flow: + +```python +workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b, agent_c]) + .start_with(agent_a) + .connect(agent_a, agent_b) + .connect(agent_b, agent_c) + .end_with(agent_c) + .visualize(output_path='workflow.png', title='My Workflow') + .build() +) +``` + +## Best Practices + +### Workflow Design + +1. **Keep it simple**: Start with linear workflows before adding complexity +2. **Use meaningful names**: Name agents and workflows descriptively +3. **Handle errors**: Always implement error handling and recovery +4. **Test thoroughly**: Test workflows with various inputs + +### Performance Tips + +1. **Use appropriate models**: Choose models based on task complexity +2. **Implement caching**: Cache expensive operations +3. **Optimize routing**: Use efficient routing logic +4. **Monitor performance**: Use telemetry to track workflow performance + +### Debugging + +Enable debug logging at the Python level to troubleshoot workflows: + +```python +import logging + +# Enable debug logging +logging.basicConfig(level=logging.DEBUG) + +# Build and run workflow - debug logs will show execution flow +workflow = ( + AriumBuilder() + .add_agents([agent_a, agent_b]) + .start_with(agent_a) + .connect(agent_a, agent_b) + .end_with(agent_b) + .build() +) + +result = await workflow.run(["Test input"]) +``` diff --git a/documentation/flo-ai/core-features/messages.mdx b/documentation/flo-ai/core-features/messages.mdx new file mode 100644 index 00000000..3586cf4c --- /dev/null +++ b/documentation/flo-ai/core-features/messages.mdx @@ -0,0 +1,609 @@ +--- +title: "Messages" +description: "Learn about message types and how to use them in Flo AI agents and workflows" +icon: "message" +--- + +## Message System Overview + +Flo AI uses a flexible message system to handle communication between users, agents, and tools. Messages support text, images, documents, and structured content, making it easy to build rich conversational experiences. + +## Message Types + +Flo AI supports several message types, each serving a specific purpose in conversations: + +### BaseMessage + +The base class for all messages. All message types inherit from `BaseMessage`. + +```python +from flo_ai.models import BaseMessage + +# BaseMessage has: +# - content: The message content (str, ImageMessageContent, DocumentMessageContent, or TextMessageContent) +# - role: The message role ('system', 'user', 'assistant', or 'function') +# - metadata: Optional dictionary for additional metadata +``` + +### UserMessage + +Messages from the user to the agent. Supports text, images, and documents. + +```python +from flo_ai.models import UserMessage + +# Simple text message +user_msg = UserMessage(content="Hello, how are you?") + +# UserMessage automatically sets role to 'user' +print(user_msg.role) # 'user' +``` + +### AssistantMessage + +Messages from the agent to the user. These are typically generated by the LLM. + +```python +from flo_ai.models import AssistantMessage + +# Assistant message +assistant_msg = AssistantMessage(content="I'm doing well, thank you!") + +# AssistantMessage automatically sets role to 'assistant' if not specified +print(assistant_msg.role) # 'assistant' + +# You can customize the role +custom_msg = AssistantMessage(content="Response", role="user") +``` + +### SystemMessage + +System-level instructions for the agent. Used for system prompts. + +```python +from flo_ai.models import SystemMessage + +# System message +system_msg = SystemMessage(content="You are a helpful assistant.") + +# SystemMessage automatically sets role to 'system' +print(system_msg.role) # 'system' +``` + +### FunctionMessage + +Messages representing function/tool call results. Used when agents call tools. + +```python +from flo_ai.models import FunctionMessage + +# Function message (result of a tool call) +function_msg = FunctionMessage( + content="The weather in San Francisco is 72°F and sunny.", + name="get_weather" # Name of the function that was called +) + +# FunctionMessage automatically sets role to 'function' +print(function_msg.role) # 'function' +print(function_msg.name) # 'get_weather' +``` + +## Message Content Types + +Messages can contain different types of content: + +### Text Content + +Simple string content (default): + +```python +from flo_ai.models import UserMessage, TextMessageContent + +# Direct string (automatically converted) +msg1 = UserMessage(content="Hello") + +# Explicit TextMessageContent +msg2 = UserMessage( + content=TextMessageContent(text="Hello") +) +``` + +### Image Content + +Send images to agents for vision tasks: + +```python +from flo_ai.models import UserMessage, ImageMessageContent +import base64 + +# Image from URL +image_msg_url = UserMessage( + content=ImageMessageContent( + url="https://example.com/image.jpg", + mime_type="image/jpeg" + ) +) + +# Image from base64 +with open('photo.jpg', 'rb') as f: + image_bytes = f.read() + image_base64 = base64.b64encode(image_bytes).decode('utf-8') + +image_msg_base64 = UserMessage( + content=ImageMessageContent( + base64=image_base64, + mime_type="image/jpeg" + ) +) + +# Image from bytes +with open('photo.jpg', 'rb') as f: + image_bytes = f.read() + +image_msg_bytes = UserMessage( + content=ImageMessageContent( + bytes=image_bytes, + mime_type="image/jpeg" + ) +) +``` + +### Document Content + +Send documents (PDFs, text files, etc.) to agents: + +```python +from flo_ai.models import UserMessage, DocumentMessageContent +from flo_ai.models.document import DocumentType +import base64 + +# PDF document from base64 +with open('report.pdf', 'rb') as f: + pdf_bytes = f.read() + pdf_base64 = base64.b64encode(pdf_bytes).decode('utf-8') + +document_msg = UserMessage( + content=DocumentMessageContent( + base64=pdf_base64, + mime_type=DocumentType.PDF.value # "application/pdf" + ) +) + +# Document from URL +document_msg_url = UserMessage( + content=DocumentMessageContent( + url="https://example.com/document.pdf", + mime_type="application/pdf" + ) +) + +# Document from bytes +with open('report.pdf', 'rb') as f: + pdf_bytes = f.read() + +document_msg_bytes = UserMessage( + content=DocumentMessageContent( + bytes=pdf_bytes, + mime_type="application/pdf" + ) +) +``` + +## Using Messages with Agents + +### Basic Text Messages + +```python +from flo_ai.agent import AgentBuilder +from flo_ai.llm import OpenAI +from flo_ai.models import UserMessage + +agent = ( + AgentBuilder() + .with_name('Assistant') + .with_prompt('You are a helpful assistant.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .build() +) + +# Simple string (automatically converted to UserMessage) +response = await agent.run("Hello!") + +# Explicit UserMessage +response = await agent.run([UserMessage(content="Hello!")]) + +# Multiple messages +response = await agent.run([ + UserMessage(content="My name is Alice."), + UserMessage(content="What's my name?") +]) +``` + +### Messages with Images + +```python +from flo_ai.models import UserMessage, ImageMessageContent +import base64 + +# Create agent with vision capabilities +vision_agent = ( + AgentBuilder() + .with_name('Vision Assistant') + .with_prompt('You are an expert at analyzing images.') + .with_llm(OpenAI(model='gpt-4o')) # Use vision-capable model + .build() +) + +# Load and encode image +with open('chart.png', 'rb') as f: + image_bytes = f.read() + image_base64 = base64.b64encode(image_bytes).decode('utf-8') + +# Send image with text prompt +response = await vision_agent.run([ + UserMessage( + content=ImageMessageContent( + base64=image_base64, + mime_type="image/png" + ) + ), + UserMessage(content="What does this chart show?") +]) +``` + +### Messages with Documents + +```python +from flo_ai.models import UserMessage, DocumentMessageContent +from flo_ai.models.document import DocumentType +import base64 + +# Create agent for document analysis +doc_agent = ( + AgentBuilder() + .with_name('Document Analyst') + .with_prompt('You analyze documents and extract key information.') + .with_llm(OpenAI(model='gpt-4o')) + .build() +) + +# Load PDF document +with open('report.pdf', 'rb') as f: + pdf_bytes = f.read() + pdf_base64 = base64.b64encode(pdf_bytes).decode('utf-8') + +# Send document with analysis request +response = await doc_agent.run([ + UserMessage( + content=DocumentMessageContent( + base64=pdf_base64, + mime_type=DocumentType.PDF.value + ) + ), + UserMessage(content="Summarize the key points from this document.") +]) +``` + +### Mixed Content Messages + +You can combine text, images, and documents in a single conversation: + +```python +# Multiple content types in one conversation +response = await agent.run([ + UserMessage(content="I have a question about this image:"), + UserMessage( + content=ImageMessageContent( + url="https://example.com/diagram.png", + mime_type="image/png" + ) + ), + UserMessage(content="And this document:"), + UserMessage( + content=DocumentMessageContent( + url="https://example.com/spec.pdf", + mime_type="application/pdf" + ) + ), + UserMessage(content="How do they relate?") +]) +``` + +## Message Metadata + +All messages support optional metadata for additional context: + +```python +from flo_ai.models import UserMessage + +# Message with metadata +msg = UserMessage( + content="Hello", + metadata={ + "timestamp": "2024-01-15T10:30:00Z", + "user_id": "user123", + "session_id": "session456", + "source": "web_app" + } +) + +print(msg.metadata) # {'timestamp': '...', 'user_id': '...', ...} +``` + +## Working with Message Responses + +### Accessing Response Messages + +Agents return lists of messages representing the conversation history: + +```python +response = await agent.run("What is 2+2?") + +# Response is a list of BaseMessage objects +print(type(response)) # + +# Get the last message (agent's response) +last_message = response[-1] +print(last_message.content) # "2+2 equals 4" +print(last_message.role) # "assistant" + +# Iterate through all messages +for msg in response: + print(f"{msg.role}: {msg.content}") +``` + +### Extracting Content + +```python +response = await agent.run("Hello!") + +# Get content from last message +if response: + last_msg = response[-1] + + # Handle different content types + if isinstance(last_msg.content, str): + text_content = last_msg.content + elif hasattr(last_msg.content, 'text'): + text_content = last_msg.content.text + else: + text_content = str(last_msg.content) + + print(text_content) +``` + +### Message History + +Agents maintain conversation history across multiple calls: + +```python +agent = ( + AgentBuilder() + .with_name('Chat Assistant') + .with_prompt('You are a helpful assistant.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .build() +) + +# First message +response1 = await agent.run("My name is Bob.") +print(f"Response: {response1[-1].content}") + +# Second message - agent remembers context +response2 = await agent.run("What's my name?") +print(f"Response: {response2[-1].content}") # "Your name is Bob." + +# Full conversation history +for msg in response2: + print(f"{msg.role}: {msg.content}") +``` + +## Using Messages in Workflows + +Messages work seamlessly in Arium workflows: + +```python +from flo_ai.arium import AriumBuilder +from flo_ai.models import UserMessage, ImageMessageContent +import base64 + +# Create workflow +workflow = ( + AriumBuilder() + .add_agent(agent1) + .add_agent(agent2) + .start_with(agent1) + .connect(agent1, agent2) + .end_with(agent2) + .build() +) + +# Run workflow with messages +response = await workflow.run([ + UserMessage(content="Process this request"), + UserMessage( + content=ImageMessageContent( + url="https://example.com/image.jpg", + mime_type="image/jpeg" + ) + ) +]) +``` + +## Message Type Reference + +### MessageType Constants + +```python +from flo_ai.models.chat_message import MessageType + +MessageType.USER # 'user' +MessageType.ASSISTANT # 'assistant' +MessageType.FUNCTION # 'function' +MessageType.SYSTEM # 'system' +``` + +### Message Classes + +| Class | Role | Content Types | Use Case | +|-------|------|---------------|----------| +| `UserMessage` | `user` | str, ImageMessageContent, DocumentMessageContent, TextMessageContent | User inputs | +| `AssistantMessage` | `assistant` | str | Agent responses | +| `SystemMessage` | `system` | str | System prompts | +| `FunctionMessage` | `function` | str | Tool/function results | + +### Content Classes + +| Class | Type | Fields | Use Case | +|-------|------|--------|----------| +| `TextMessageContent` | `text` | `text: str` | Plain text content | +| `ImageMessageContent` | `image` | `url`, `base64`, `bytes`, `mime_type` | Image content | +| `DocumentMessageContent` | `document` | `url`, `base64`, `bytes`, `mime_type` | Document content | + +## Best Practices + +### 1. Use Appropriate Content Types + +```python +# ✅ Good: Use ImageMessageContent for images +image_msg = UserMessage( + content=ImageMessageContent(url="https://example.com/img.jpg") +) + +# ❌ Avoid: Don't embed images as text +bad_msg = UserMessage(content="") # Not recommended +``` + +### 2. Handle Base64 Encoding Properly + +```python +import base64 + +# ✅ Good: Proper base64 encoding +with open('file.pdf', 'rb') as f: + file_bytes = f.read() + file_base64 = base64.b64encode(file_bytes).decode('utf-8') + +# ❌ Avoid: Don't forget to decode bytes to string +bad_base64 = base64.b64encode(file_bytes) # Returns bytes, not string +``` + +### 3. Specify MIME Types + +```python +# ✅ Good: Always specify mime_type +image_msg = ImageMessageContent( + base64=image_base64, + mime_type="image/png" # Explicit MIME type +) + +# ❌ Avoid: Missing mime_type may cause issues +bad_msg = ImageMessageContent(base64=image_base64) # No MIME type +``` + +### 4. Use Metadata for Context + +```python +# ✅ Good: Add metadata for tracking +msg = UserMessage( + content="Hello", + metadata={ + "user_id": "123", + "timestamp": "2024-01-15T10:30:00Z" + } +) +``` + +### 5. Handle Message Lists Properly + +```python +# ✅ Good: Pass list of messages +response = await agent.run([ + UserMessage(content="First message"), + UserMessage(content="Second message") +]) + +# ✅ Also good: Single string (auto-converted) +response = await agent.run("Single message") + +# ❌ Avoid: Don't pass raw strings in lists without UserMessage +# response = await agent.run(["raw string"]) # May cause issues +``` + +## Examples + +### Multi-Modal Conversation + +```python +from flo_ai.agent import AgentBuilder +from flo_ai.llm import OpenAI +from flo_ai.models import UserMessage, ImageMessageContent, DocumentMessageContent +import base64 + +agent = ( + AgentBuilder() + .with_name('Multi-Modal Assistant') + .with_prompt('You can analyze images and documents.') + .with_llm(OpenAI(model='gpt-4o')) + .build() +) + +# Load image +with open('screenshot.png', 'rb') as f: + image_base64 = base64.b64encode(f.read()).decode('utf-8') + +# Load document +with open('spec.pdf', 'rb') as f: + doc_base64 = base64.b64encode(f.read()).decode('utf-8') + +# Multi-modal conversation +response = await agent.run([ + UserMessage(content="I need help with this:"), + UserMessage( + content=ImageMessageContent( + base64=image_base64, + mime_type="image/png" + ) + ), + UserMessage( + content=DocumentMessageContent( + base64=doc_base64, + mime_type="application/pdf" + ) + ), + UserMessage(content="Compare the image with the document.") +]) +``` + +### Function Message Handling + +```python +from flo_ai.models import FunctionMessage + +# Function messages are automatically created when tools are called +# But you can also create them manually if needed + +function_result = FunctionMessage( + content="The result of the function call", + name="calculate_total" +) + +# Function messages are typically used internally by the agent system +# when tools are executed +``` + +### Message Validation + +```python +from flo_ai.models import UserMessage, BaseMessage + +# Validate message type +msg = UserMessage(content="Hello") + +if isinstance(msg, BaseMessage): + print("Valid message") + print(f"Role: {msg.role}") + print(f"Content: {msg.content}") +``` + +The message system provides a flexible foundation for building rich, multi-modal AI applications with Flo AI! diff --git a/documentation/flo-ai/core-features/yaml-agents.mdx b/documentation/flo-ai/core-features/yaml-agents.mdx new file mode 100644 index 00000000..cb26701d --- /dev/null +++ b/documentation/flo-ai/core-features/yaml-agents.mdx @@ -0,0 +1,784 @@ +--- +title: "YAML based Agents" +description: "Create and configure agents using YAML configuration files" +icon: "file-code" +--- + +## YAML Agent Configuration + +Flo AI supports creating agents entirely through YAML configuration files, making it easy to version control, share, and manage agent configurations. + +## Basic YAML Agent + +Create a simple agent using YAML: + +```yaml agent.yaml +metadata: + name: "customer-support-agent" + version: "1.0.0" + description: "Customer support agent for handling inquiries" + +agent: + name: "Customer Support" + prompt: "You are a helpful customer support agent. Provide friendly and accurate assistance." + model: + provider: "openai" + name: "gpt-4o-mini" + temperature: 0.7 + max_tokens: 1000 + settings: + max_retries: 3 +``` + +```python Load YAML Agent +from flo_ai.agent import AgentBuilder + +# Load agent from YAML file +agent_builder = AgentBuilder.from_yaml(yaml_file='agent.yaml') +agent = agent_builder.build() +response = await agent.run('How can I reset my password?') +``` + +## Advanced YAML Configuration + +### Agent with Tools + +Tools in YAML can be specified as string references (to tools in a tool registry) or as tool configurations with pre-filled parameters: + +```yaml tool-agent.yaml +metadata: + name: "calculator-agent" + version: "1.0.0" + +agent: + name: "Calculator Assistant" + prompt: "You are a math assistant that can perform calculations." + model: + provider: "anthropic" + name: "claude-3-5-sonnet-20240620" + temperature: 0.3 + + # Simple string reference (tool must exist in tool_registry) + tools: + - "calculate" + - "get_weather" + + # Or with tool configuration for pre-filled parameters + # tools: + # - name: "calculate" + # prefilled_params: + # operation: "add" + # - name: "get_weather" + # name_override: "weather_lookup" + # description_override: "Get current weather conditions" +``` + +### Agent with Structured Output + +Use the `parser` field to define structured output schemas: + +```yaml structured-agent.yaml +metadata: + name: "analysis-agent" + version: "1.0.0" + +agent: + name: "Business Analyst" + prompt: "Analyze business data and provide structured insights." + model: + provider: "openai" + name: "gpt-4o" + temperature: 0.2 + + parser: + name: "AnalysisResult" + description: "Structured analysis output" + fields: + - name: "summary" + type: "str" + description: "Executive summary" + required: true + - name: "key_findings" + type: "array" + description: "List of key findings" + items: + type: "str" + description: "A key finding" + - name: "recommendations" + type: "array" + description: "Actionable recommendations" + items: + type: "str" + description: "A recommendation" +``` + +### Agent with Role and Reasoning Pattern + +```yaml advanced-agent.yaml +metadata: + name: "advanced-agent" + version: "1.0.0" + +agent: + name: "Advanced Assistant" + prompt: "You are a helpful assistant." + job: "You help users solve problems." # Alternative to 'prompt' + role: "Senior Support Specialist" # Internal role description + act_as: "assistant" # How agent presents itself in messages + base_url: "https://api.example.com" # Optional base URL override + model: + provider: "openai" + name: "gpt-4o" + temperature: 0.7 + max_tokens: 2000 + timeout: 60 + settings: + temperature: 0.3 # Can override model temperature + max_retries: 5 + reasoning_pattern: "REACT" # DIRECT, REACT, or COT +``` + +### Agent with Examples + +You can provide example input/output pairs to guide the agent: + +```yaml example-agent.yaml +metadata: + name: "example-agent" + version: "1.0.0" + +agent: + name: "Example Agent" + prompt: "You provide examples based on patterns." + model: + provider: "openai" + name: "gpt-4o-mini" + examples: + - input: "What is the weather?" + output: "I can help you check the weather. Please provide your location." + - input: "Tell me a joke" + output: "Why don't scientists trust atoms? Because they make up everything!" +``` + +## YAML Schema Reference + +### Metadata Section + +```yaml +metadata: + name: "agent-name" # Required: Unique agent identifier + version: "1.0.0" # Required: Semantic version + description: "Agent description" # Optional: Human-readable description + author: "Your Name" # Optional: Agent author + tags: ["tag1", "tag2"] # Optional: Categorization tags +``` + +### Agent Configuration + +```yaml +agent: + name: "Agent Display Name" # Required: Human-readable name + prompt: "System prompt" # Required: Agent's system prompt (or use 'job') + job: "System prompt" # Alternative to 'prompt' (job takes precedence) + role: "Role description" # Optional: Internal role description + act_as: "assistant" # Optional: Message role (default: "assistant") + base_url: "https://api.example.com" # Optional: Base URL override + + model: # Required: LLM configuration + provider: "openai" # Required: openai, anthropic, claude, gemini, google, ollama, vertexai, rootflo, openai_vllm + name: "gpt-4o-mini" # Required: Model name (for most providers) + base_url: "https://api.openai.com/v1" # Optional: Custom base URL + temperature: 0.7 # Optional: 0.0 to 2.0 + max_tokens: 1000 # Optional: Maximum response length + timeout: 30 # Optional: Request timeout in seconds + # VertexAI specific + project: "my-project" # Required for vertexai + location: "us-central1" # Required for vertexai + # RootFlo specific + model_id: "model-123" # Required for rootflo + # OpenAI vLLM specific + api_key: "sk-..." # Required for openai_vllm + + settings: # Optional: Agent settings + temperature: 0.7 # Optional: Override model temperature + max_retries: 3 # Optional: Number of retry attempts + reasoning_pattern: "DIRECT" # Optional: DIRECT, REACT, or COT + + tools: [] # Optional: List of tools (see tools section) + parser: {} # Optional: Parser configuration for structured output (see parser section) + examples: [] # Optional: Example input/output pairs (see examples section) +``` + +### Tools Configuration + +Tools can be specified as simple string references or as tool configuration objects: + +```yaml +# Simple string reference (tool must exist in tool_registry) +tools: + - "tool_name" + - "another_tool" + +# Or with tool configuration +tools: + - name: "tool_name" # Required: Tool identifier (must exist in tool_registry) + prefilled_params: # Optional: Pre-filled parameters + param1: "value1" + param2: 42 + name_override: "custom_tool_name" # Optional: Custom name override + description_override: "Custom description" # Optional: Custom description override +``` + +**Note:** Tools must be registered in a `tool_registry` dictionary when loading the YAML. The registry maps tool names to `Tool` objects. + +### Parser Configuration (Structured Output) + +Use the `parser` field to define structured output schemas: + +```yaml +parser: + name: "ResultModel" # Required: Parser/model name + version: "1.0.0" # Optional: Parser version + description: "Output structure description" # Optional: Description + fields: # Required: List of field definitions + - name: "field_name" # Required: Field name + type: "str" # Required: Field type (str, int, bool, float, literal, object, array) + description: "Field description" # Required: Field description + required: true # Optional: Whether field is required + # For literal type + values: # Required for literal type + - value: "option1" + description: "First option" + - value: "option2" + description: "Second option" + # For array type + items: # Required for array type + type: "str" + description: "Item description" + # For object type + fields: # Required for object type + - name: "nested_field" + type: "str" + description: "Nested field" + default_value_prompt: "Generate a default value" # Optional: For literal fields +``` + +### Examples Configuration + +Provide example input/output pairs to guide the agent: + +```yaml +examples: + - input: "Example user input" # Required: Example input + output: "Example agent output" # Required: Example output (string or dict) + - input: "Another example" + output: + key: "value" + nested: {"data": "structure"} +``` + +## Loading and Using YAML Agents + +### Basic Loading + +```python +from flo_ai.agent import AgentBuilder + +# Load from file +agent_builder = AgentBuilder.from_yaml(yaml_file='agent.yaml') +agent = agent_builder.build() + +# Load from string +yaml_content = """ +agent: + name: "Test Agent" + prompt: "You are a test agent." + model: + provider: "openai" + name: "gpt-4o-mini" +""" + +agent_builder = AgentBuilder.from_yaml(yaml_str=yaml_content) +agent = agent_builder.build() + +# Load with tool registry +from flo_ai.tool import flo_tool + +@flo_tool(description="Get weather") +async def get_weather(city: str) -> str: + return f"Weather in {city}: sunny" + +tool_registry = {"get_weather": get_weather.tool} +agent_builder = AgentBuilder.from_yaml( + yaml_file='tool-agent.yaml', + tool_registry=tool_registry +) +agent = agent_builder.build() +``` + +### Writing/Saving YAML Configuration + +To save an agent configuration to YAML, you can manually construct the YAML structure: + +```python +import yaml +from flo_ai.agent import AgentBuilder +from flo_ai.agent.base_agent import ReasoningPattern +from flo_ai.llm import OpenAI + +# Create an agent programmatically +agent_builder = ( + AgentBuilder() + .with_name('Customer Support') + .with_prompt('You are a helpful assistant.') + .with_llm(OpenAI(model='gpt-4o-mini', temperature=0.7)) + .with_retries(3) + .with_reasoning(ReasoningPattern.REACT) +) + +# Build the agent +agent = agent_builder.build() + +# Create YAML configuration dictionary +yaml_config = { + 'metadata': { + 'name': 'customer-support-agent', + 'version': '1.0.0', + 'description': 'Customer support agent' + }, + 'agent': { + 'name': agent_builder._name, + 'prompt': str(agent_builder._system_prompt), + 'model': { + 'provider': 'openai', + 'name': 'gpt-4o-mini', + 'temperature': agent_builder._llm.temperature if agent_builder._llm else 0.7 + }, + 'settings': { + 'max_retries': agent_builder._max_retries, + 'reasoning_pattern': agent_builder._reasoning_pattern.name + } + } +} + +# Write to file +with open('exported-agent.yaml', 'w') as f: + yaml.dump(yaml_config, f, default_flow_style=False, sort_keys=False) + +print("✅ Agent configuration saved to exported-agent.yaml") +``` + +### Helper Function for Exporting + +Here's a more complete helper function to export agent configurations: + +```python +import yaml +from typing import Optional +from flo_ai.agent import AgentBuilder +from flo_ai.agent.base_agent import ReasoningPattern + +def export_agent_to_yaml( + agent_builder: AgentBuilder, + output_file: str, + metadata: Optional[dict] = None +) -> None: + """Export an AgentBuilder configuration to YAML file. + + Args: + agent_builder: The AgentBuilder instance to export + output_file: Path to output YAML file + metadata: Optional metadata dictionary + """ + config = {} + + # Add metadata + if metadata: + config['metadata'] = metadata + else: + config['metadata'] = { + 'name': agent_builder._name.lower().replace(' ', '-'), + 'version': '1.0.0' + } + + # Build agent configuration + agent_config = { + 'name': agent_builder._name, + } + + # Add prompt (prefer job if available, otherwise prompt) + if hasattr(agent_builder, '_system_prompt'): + agent_config['prompt'] = str(agent_builder._system_prompt) + + # Add role and act_as if set + if agent_builder._role: + agent_config['role'] = agent_builder._role + if agent_builder._act_as and agent_builder._act_as != 'assistant': + agent_config['act_as'] = agent_builder._act_as + + # Add model configuration + if agent_builder._llm: + llm_config = {} + # Extract provider and model name from LLM + # This is a simplified example - you may need to adjust based on your LLM implementation + if hasattr(agent_builder._llm, 'model'): + llm_config['provider'] = 'openai' # Adjust based on actual LLM type + llm_config['name'] = agent_builder._llm.model + if hasattr(agent_builder._llm, 'temperature'): + llm_config['temperature'] = agent_builder._llm.temperature + if hasattr(agent_builder._llm, 'max_tokens'): + llm_config['max_tokens'] = agent_builder._llm.max_tokens + + if llm_config: + agent_config['model'] = llm_config + + # Add settings + settings = {} + if agent_builder._max_retries != 3: # Only include if not default + settings['max_retries'] = agent_builder._max_retries + if agent_builder._reasoning_pattern != ReasoningPattern.DIRECT: + settings['reasoning_pattern'] = agent_builder._reasoning_pattern.name + + if settings: + agent_config['settings'] = settings + + # Add tools if present + if agent_builder._tools: + # Note: Tools are exported as references - actual tool definitions + # should be in your tool registry + agent_config['tools'] = [tool.name for tool in agent_builder._tools] + + config['agent'] = agent_config + + # Write to file + with open(output_file, 'w') as f: + yaml.dump(config, f, default_flow_style=False, sort_keys=False) + + print(f"✅ Agent configuration exported to {output_file}") + +# Usage +agent_builder = ( + AgentBuilder() + .with_name('My Agent') + .with_prompt('You are helpful.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .with_retries(5) +) + +export_agent_to_yaml( + agent_builder, + 'my-agent.yaml', + metadata={'name': 'my-agent', 'version': '1.0.0', 'author': 'Your Name'} +) +``` + +### Using Variables in Prompts + +You can use variables in your prompts by using `` syntax. Variables are provided at runtime: + +```python +# Agent YAML with variables in prompt +# agent: +# name: "Personalized Assistant" +# prompt: "Hello ! You are a at ." + +agent_builder = AgentBuilder.from_yaml('variable-agent.yaml') +agent = agent_builder.build() + +# Provide variables at runtime +variables = { + 'user_name': 'John', + 'user_role': 'Data Scientist', + 'company': 'TechCorp' +} + +response = await agent.run( + 'What should I focus on today?', + variables=variables +) +``` + +### Tool Integration + +Tools must be registered in a tool registry when loading YAML: + +```python +from flo_ai.agent import AgentBuilder +from flo_ai.tool import flo_tool + +# Define tool functions +@flo_tool(description="Perform mathematical calculations") +async def calculate(operation: str, x: float, y: float) -> float: + operations = { + 'add': lambda: x + y, + 'subtract': lambda: x - y, + 'multiply': lambda: x * y, + 'divide': lambda: x / y if y != 0 else 0, + } + return operations.get(operation, lambda: 0)() + +@flo_tool(description="Get weather information") +async def get_weather(city: str) -> str: + return f"Weather in {city}: sunny, 25°C" + +# Create tool registry +tool_registry = { + 'calculate': calculate.tool, + 'get_weather': get_weather.tool +} + +# Load agent with tool registry +agent_builder = AgentBuilder.from_yaml( + yaml_file='tool-agent.yaml', + tool_registry=tool_registry +) +agent = agent_builder.build() + +response = await agent.run('Calculate 5 plus 3') +``` + +Alternatively, you can add tools after loading: + +```python +# Load agent without tools +agent_builder = AgentBuilder.from_yaml('agent.yaml') +agent_builder.add_tool(calculate.tool) +agent = agent_builder.build() +``` + +## Best Practices + +### YAML Structure + +1. **Use meaningful names**: Choose descriptive agent and variable names +2. **Version your configurations**: Always include version numbers +3. **Document thoroughly**: Add descriptions for all components +4. **Validate schemas**: Use YAML schema validation tools + +### Performance Optimization + +```yaml +# Optimize for performance +agent: + name: "Optimized Agent" + prompt: "Concise and effective prompt" + model: + provider: "openai" + name: "gpt-4o-mini" # Use faster model for simple tasks + temperature: 0.3 # Lower temperature for consistency + max_tokens: 500 # Limit response length + settings: + max_retries: 2 # Limit retries to avoid costs +``` + +### Security Considerations + +```yaml +# Secure configuration +agent: + name: "Secure Agent" + prompt: | + You are a secure assistant. Never: + - Share sensitive information + - Execute dangerous commands + - Access unauthorized resources + model: + provider: "openai" + name: "gpt-4o" + temperature: 0.1 # Lower temperature for consistency + max_tokens: 200 # Limit response length + timeout: 10 # Short timeout for security + settings: + max_retries: 1 # Limit retries for security +``` + +## Validation and Testing + +### Schema Validation + +Use the `AgentYamlModel` for proper validation of YAML configurations: + +```python +from flo_ai.agent import AgentBuilder +from flo_ai.models.agent import AgentYamlModel +import yaml + +# Validate YAML structure using AgentYamlModel +def validate_agent_yaml(file_path): + """Validate YAML configuration using AgentYamlModel. + + Args: + file_path: Path to YAML file to validate + + Returns: + bool: True if valid, False otherwise + """ + try: + with open(file_path, 'r') as f: + config = yaml.safe_load(f) + + # Use AgentYamlModel for validation + # This will validate all fields, types, and constraints + validated_config = AgentYamlModel(**config) + + print("✅ YAML configuration is valid") + print(f" Agent name: {validated_config.agent.name}") + if validated_config.metadata: + print(f" Metadata: {validated_config.metadata.name} v{validated_config.metadata.version}") + return True + except ValueError as e: + # AgentBuilder._validate_yaml_config raises ValueError with formatted errors + print(f"❌ YAML validation failed: {e}") + return False + except Exception as e: + print(f"❌ YAML validation failed: {e}") + return False + +# Validate a YAML file +validate_agent_yaml('agent.yaml') +``` + +You can also use `AgentBuilder.from_yaml()` which automatically validates the configuration: + +```python +from flo_ai.agent import AgentBuilder + +def validate_and_load_agent(yaml_file): + """Validate and load agent from YAML file. + + Args: + yaml_file: Path to YAML file + + Returns: + AgentBuilder or None if validation fails + """ + try: + # from_yaml automatically validates using AgentYamlModel + agent_builder = AgentBuilder.from_yaml(yaml_file=yaml_file) + print("✅ YAML configuration is valid and agent builder created") + return agent_builder + except ValueError as e: + # Validation errors are raised as ValueError with detailed messages + print(f"❌ YAML validation failed:\n{e}") + return None + except Exception as e: + print(f"❌ Error loading agent: {e}") + return None + +# Validate and load +agent_builder = validate_and_load_agent('agent.yaml') +if agent_builder: + agent = agent_builder.build() +``` + +### Testing YAML Agents + +```python +import asyncio +from flo_ai.agent import AgentBuilder + +async def test_yaml_agent(): + """Test a YAML agent configuration.""" + try: + # Load and validate agent (validation happens automatically) + agent_builder = AgentBuilder.from_yaml(yaml_file='agent.yaml') + agent = agent_builder.build() + + # Test basic functionality + response = await agent.run('Hello!') + assert response is not None + print(f"✅ Agent responds: {len(response)} message(s)") + + # Test with variables (if prompt contains variables) + variables = {'user_name': 'Test User', 'company': 'TestCorp'} + response = await agent.run('What can you help me with?', variables=variables) + print(f"✅ Agent with variables: {len(response)} message(s)") + + return True + except ValueError as e: + print(f"❌ Validation error: {e}") + return False + except Exception as e: + print(f"❌ Error: {e}") + return False + +# Run tests +asyncio.run(test_yaml_agent()) +``` + +## Examples + +### Customer Support Agent + +```yaml customer-support.yaml +metadata: + name: "customer-support" + version: "1.0.0" + description: "Handles customer inquiries and support requests" + +agent: + name: "Customer Support Agent" + prompt: | + You are a professional customer support agent. Your role is to: + 1. Listen to customer concerns with empathy + 2. Provide accurate and helpful information + 3. Escalate complex issues when necessary + 4. Maintain a friendly and professional tone + + Always be patient, understanding, and solution-oriented. + role: "Senior Support Specialist" + model: + provider: "openai" + name: "gpt-4o-mini" + temperature: 0.3 + max_tokens: 1000 + settings: + max_retries: 2 + reasoning_pattern: "DIRECT" +``` + +### Data Analysis Agent + +```yaml data-analyst.yaml +metadata: + name: "data-analyst" + version: "1.0.0" + description: "Analyzes data and provides insights" + +agent: + name: "Data Analyst" + prompt: | + You are an expert data analyst. Analyze the provided data and: + 1. Identify key patterns and trends + 2. Provide statistical insights + 3. Suggest actionable recommendations + 4. Highlight any anomalies or concerns + model: + provider: "openai" + name: "gpt-4o" + temperature: 0.2 + parser: + name: "AnalysisResult" + description: "Structured analysis output" + fields: + - name: "summary" + type: "str" + description: "Executive summary" + required: true + - name: "insights" + type: "array" + description: "Key insights" + items: + type: "str" + description: "An insight" + - name: "recommendations" + type: "array" + description: "Actionable recommendations" + items: + type: "str" + description: "A recommendation" + settings: + reasoning_pattern: "COT" # Use Chain of Thought for complex analysis +``` + +This YAML-based approach makes agent configuration declarative, versionable, and easily shareable across teams! diff --git a/documentation/flo-ai/core-features/yaml-workflows.mdx b/documentation/flo-ai/core-features/yaml-workflows.mdx new file mode 100644 index 00000000..5a994300 --- /dev/null +++ b/documentation/flo-ai/core-features/yaml-workflows.mdx @@ -0,0 +1,997 @@ +--- +title: "YAML based Workflows" +description: "Create and configure multi-agent workflows using YAML configuration files" +icon: "file-code" +--- + +## YAML Workflow Configuration + +Flo AI supports creating entire multi-agent workflows through YAML configuration files, making it easy to version control, share, and manage complex workflow configurations. + +## Basic YAML Workflow + +Create a simple workflow using YAML: + +```yaml workflow.yaml +metadata: + name: "content-analysis-workflow" + version: "1.0.0" + description: "Multi-agent content analysis pipeline" + +arium: + agents: + - name: "analyzer" + role: "Content Analyst" + job: "Analyze the input content and extract key insights." + model: + provider: "openai" + name: "gpt-4o-mini" + temperature: 0.7 + + - name: "summarizer" + role: "Content Summarizer" + job: "Create a concise summary based on the analysis." + model: + provider: "anthropic" + name: "claude-3-5-sonnet-20240620" + temperature: 0.3 + + workflow: + start: "analyzer" + edges: + - from: "analyzer" + to: ["summarizer"] + end: ["summarizer"] +``` + +```python Load YAML Workflow +from flo_ai.arium import AriumBuilder + +# Load workflow from YAML +arium_builder = AriumBuilder.from_yaml(yaml_file='workflow.yaml') +result = await arium_builder.build_and_run(["Analyze this quarterly business report..."]) +``` + +## Advanced YAML Configuration + +### Workflow with Function Nodes + +Function nodes allow you to execute custom Python functions within workflows: + +```yaml function-workflow.yaml +metadata: + name: "data-processing-workflow" + version: "1.0.0" + +arium: + agents: + - name: "analyzer" + job: "Analyze the processed data." + model: + provider: "openai" + name: "gpt-4o-mini" + + function_nodes: + - name: "data_processor" + function_name: "process_data" # Must exist in function_registry + description: "Processes input data" + input_filter: ["node1", "node2"] # Optional: filter inputs from specific nodes + prefilled_params: # Optional: pre-fill function parameters + format: "json" + + workflow: + start: "data_processor" + edges: + - from: "data_processor" + to: ["analyzer"] + end: ["analyzer"] +``` + +### Workflow with Routers + +Use routers for intelligent routing decisions: + +```yaml router-workflow.yaml +metadata: + name: "routing-workflow" + version: "1.0.0" + +arium: + agents: + - name: "classifier" + job: "Classify the input content." + model: + provider: "openai" + name: "gpt-4o-mini" + + - name: "technical_writer" + job: "Write technical content." + model: + provider: "openai" + name: "gpt-4o" + + - name: "creative_writer" + job: "Write creative content." + model: + provider: "anthropic" + name: "claude-3-5-sonnet-20240620" + + routers: + - name: "content_type_router" + type: "smart" # Uses LLM for intelligent routing + routing_options: + technical_writer: "Technical content, documentation, tutorials" + creative_writer: "Creative writing, storytelling, fiction" + model: + provider: "openai" + name: "gpt-4o-mini" + temperature: 0.3 + + workflow: + start: "classifier" + edges: + - from: "classifier" + to: ["technical_writer", "creative_writer"] + router: "content_type_router" + end: ["technical_writer", "creative_writer"] +``` + +### Workflow with Nested Arium (Sub-workflows) + +Create nested workflows for complex scenarios: + +```yaml nested-workflow.yaml +metadata: + name: "nested-workflow" + version: "1.0.0" + +arium: + agents: + - name: "coordinator" + job: "Coordinate the workflow." + model: + provider: "openai" + name: "gpt-4o-mini" + + ariums: + - name: "sub_workflow" + inherit_variables: true # Inherit variables from parent + yaml_file: "sub-workflow.yaml" # Reference external YAML file + # Or use inline configuration: + # agents: + # - name: "sub_agent" + # job: "Process in sub-workflow." + # model: + # provider: "openai" + # name: "gpt-4o-mini" + # workflow: + # start: "sub_agent" + # edges: [] + # end: ["sub_agent"] + + workflow: + start: "coordinator" + edges: + - from: "coordinator" + to: ["sub_workflow"] + end: ["sub_workflow"] +``` + +### Workflow with ForEach Nodes + +Process items in batches using ForEach nodes: + +```yaml foreach-workflow.yaml +metadata: + name: "batch-processing-workflow" + version: "1.0.0" + +arium: + agents: + - name: "processor" + job: "Process each item in the batch." + model: + provider: "openai" + name: "gpt-4o-mini" + + iterators: # or use 'foreach_nodes' + - name: "batch_processor" + execute_node: "processor" # Node to execute on each item + + workflow: + start: "batch_processor" + edges: + - from: "batch_processor" + to: ["end"] # Special 'end' keyword + end: ["end"] +``` + +## Agent Configuration in Workflows + +Agents in workflows support multiple configuration methods: + +### Method 1: Direct Configuration + +```yaml +agents: + - name: "agent_name" + job: "Agent's system prompt" + role: "Agent Role" # Optional + model: + provider: "openai" + name: "gpt-4o-mini" + settings: + max_retries: 3 + reasoning_pattern: "REACT" +``` + +### Method 2: Reference Pre-built Agent + +```yaml +agents: + - name: "pre_built_agent" # Only name - agent must be provided in agents dict +``` + +### Method 3: Inline YAML Config + +```yaml +agents: + - name: "agent_name" + yaml_config: | + agent: + name: "Agent Name" + prompt: "Agent prompt" + model: + provider: "openai" + name: "gpt-4o-mini" +``` + +### Method 4: External YAML File Reference + +```yaml +agents: + - name: "agent_name" + yaml_file: "agent-config.yaml" +``` + +## Router Types + +### Smart Router + +Uses LLM to intelligently route based on content: + +```yaml +routers: + - name: "smart_router" + type: "smart" + routing_options: + agent1: "Description of when to route to agent1" + agent2: "Description of when to route to agent2" + model: + provider: "openai" + name: "gpt-4o-mini" + settings: + temperature: 0.3 + fallback_strategy: "first" # first, random, or all +``` + +### Task Classifier Router + +Routes based on task categories: + +```yaml +routers: + - name: "task_router" + type: "task_classifier" + task_categories: + coding: + description: "Programming, debugging, code review tasks" + keywords: ["code", "debug", "programming"] + examples: ["Fix this bug", "Review this code"] + writing: + description: "Content writing, documentation tasks" + keywords: ["write", "document", "content"] + examples: ["Write a blog post", "Document this API"] + model: + provider: "openai" + name: "gpt-4o-mini" +``` + +### Conversation Analysis Router + +Routes based on conversation analysis: + +```yaml +routers: + - name: "conversation_router" + type: "conversation_analysis" + routing_logic: + agent1: "Route to agent1 when conversation indicates X" + agent2: "Route to agent2 when conversation indicates Y" + model: + provider: "openai" + name: "gpt-4o-mini" +``` + +### Reflection Router + +For A→B→A→C feedback patterns: + +```yaml +routers: + - name: "reflection_router" + type: "reflection" + flow_pattern: ["writer", "critic", "writer", "editor"] # A → B → A → C pattern + model: + provider: "openai" + name: "gpt-4o-mini" + settings: + allow_early_exit: true # Allow early exit if criteria met +``` + +## YAML Schema Reference + +### Metadata Section + +```yaml +metadata: + name: "workflow-name" # Required: Unique workflow identifier + version: "1.0.0" # Required: Semantic version + description: "Workflow description" # Optional: Human-readable description + author: "Your Name" # Optional: Workflow author + tags: ["tag1", "tag2"] # Optional: Categorization tags +``` + +### Arium Configuration + +```yaml +arium: + agents: [] # Optional: List of agents (see agent configuration) + function_nodes: [] # Optional: List of function nodes (see function node configuration) + routers: [] # Optional: List of routers (see router configuration) + ariums: [] # Optional: List of nested arium nodes (see arium node configuration) + iterators: [] # Optional: List of foreach nodes (alias: foreach_nodes) + workflow: # Required: Workflow configuration + start: "node_name" # Required: Starting node name + edges: [] # Required: List of edges (see edge configuration) + end: ["node_name"] # Required: List of end node names +``` + +### Agent Configuration + +```yaml +agents: + - name: "agent_name" # Required: Agent name + job: "System prompt" # Required (if using direct config): Agent's system prompt + prompt: "System prompt" # Alternative to 'job' + role: "Agent Role" # Optional: Internal role description + act_as: "assistant" # Optional: Message role + base_url: "https://api.example.com" # Optional: Base URL override + input_filter: ["node1", "node2"] # Optional: Filter inputs from specific workflow nodes + model: # Optional: LLM configuration (required for direct config) + provider: "openai" # Required: LLM provider + name: "gpt-4o-mini" # Required: Model name + temperature: 0.7 # Optional: Temperature setting + max_tokens: 1000 # Optional: Maximum tokens + settings: # Optional: Agent settings + max_retries: 3 # Optional: Maximum retries + reasoning_pattern: "DIRECT" # Optional: DIRECT, REACT, or COT + # Alternative: Reference pre-built agent (only name) + # - name: "pre_built_agent" + # Alternative: Inline YAML config + # yaml_config: "agent yaml string" + # Alternative: External YAML file + # yaml_file: "agent.yaml" +``` + +**Note:** `input_filter` for agents specifies which nodes' outputs should be passed as inputs to this agent. Only results from the specified node names will be included. If not specified, all available memory items are passed. + +### Function Node Configuration + +```yaml +function_nodes: + - name: "function_node_name" # Required: Function node name + function_name: "function_name" # Required: Name in function_registry + description: "Function description" # Optional: Description + input_filter: ["node1", "node2"] # Optional: Filter inputs from specific workflow nodes + prefilled_params: # Optional: Pre-filled parameters + param1: "value1" + param2: 42 +``` + +**Note:** `input_filter` specifies which nodes' outputs should be passed as inputs to this node. Only results from the specified node names will be included. If not specified, all available memory items are passed. + +### Router Configuration + +```yaml +routers: + - name: "router_name" # Required: Router name + type: "smart" # Required: smart, task_classifier, conversation_analysis, reflection, plan_execute + model: # Optional: LLM configuration for router + provider: "openai" + name: "gpt-4o-mini" + settings: # Optional: Router settings + temperature: 0.3 + fallback_strategy: "first" # first, random, or all + allow_early_exit: true # For reflection router + planner_agent: "planner" # For plan_execute router + executor_agent: "executor" # For plan_execute router + reviewer_agent: "reviewer" # For plan_execute router + # Smart router + routing_options: # Required for smart router + agent1: "Description for agent1" + agent2: "Description for agent2" + # Task classifier router + task_categories: # Required for task_classifier router + category1: + description: "Category description" + keywords: ["keyword1", "keyword2"] + examples: ["example1", "example2"] + # Conversation analysis router + routing_logic: # Required for conversation_analysis router + agent1: "Routing logic for agent1" + # Reflection router + flow_pattern: ["agent1", "agent2", "agent1"] # Required for reflection router + # Plan-execute router + agents: # Required for plan_execute router + planner: "Planner description" + executor: "Executor description" +``` + +### Edge Configuration + +```yaml +edges: + - from: "source_node" # Required: Source node name + to: ["target_node1", "target_node2"] # Required: List of target node names + router: "router_name" # Optional: Router name to use for routing +``` + +### Arium Node Configuration (Nested Workflows) + +```yaml +ariums: + - name: "nested_arium_name" # Required: Nested arium name + inherit_variables: true # Optional: Inherit variables from parent (default: true) + input_filter: ["node1", "node2"] # Optional: Filter inputs from specific workflow nodes + yaml_file: "nested-workflow.yaml" # Optional: External YAML file reference + # Or use inline configuration: + agents: [] # Optional: List of agents for nested arium + function_nodes: [] # Optional: List of function nodes + routers: [] # Optional: List of routers + ariums: [] # Optional: Nested arium nodes (supports nesting) + iterators: [] # Optional: List of foreach nodes + workflow: # Required if using inline config + start: "node_name" + edges: [] + end: ["node_name"] +``` + +**Note:** `input_filter` for nested ariums works the same way - it filters which parent workflow nodes' outputs are passed to the nested workflow. + +### ForEach Node Configuration + +```yaml +iterators: # or 'foreach_nodes' + - name: "foreach_node_name" # Required: ForEach node name + execute_node: "node_name" # Required: Name of node to execute on each item + input_filter: ["node1", "node2"] # Optional: Filter inputs from specific workflow nodes +``` + +**Note:** `input_filter` for ForEach nodes filters which nodes' outputs are used as the collection to iterate over. + +## Loading and Using YAML Workflows + +### Basic Loading + +```python +from flo_ai.arium import AriumBuilder + +# Load from file +arium_builder = AriumBuilder.from_yaml(yaml_file='workflow.yaml') +arium = arium_builder.build() + +# Load from string +yaml_content = """ +arium: + agents: + - name: "agent1" + job: "Process input." + model: + provider: "openai" + name: "gpt-4o-mini" + workflow: + start: "agent1" + edges: [] + end: ["agent1"] +""" + +arium_builder = AriumBuilder.from_yaml(yaml_str=yaml_content) +arium = arium_builder.build() +``` + +### Loading with Registries + +When using function nodes or tool-enabled agents, provide registries: + +```python +from flo_ai.arium import AriumBuilder +from flo_ai.tool import flo_tool + +# Define functions for function nodes +def process_data(inputs, variables=None, **kwargs): + return f"Processed: {inputs}" + +# Define tools for agents +@flo_tool(description="Get weather") +async def get_weather(city: str) -> str: + return f"Weather in {city}: sunny" + +# Create registries +function_registry = { + "process_data": process_data +} + +tool_registry = { + "get_weather": get_weather.tool +} + +# Load with registries +arium_builder = AriumBuilder.from_yaml( + yaml_file='workflow.yaml', + function_registry=function_registry, + tool_registry=tool_registry +) +arium = arium_builder.build() +``` + +### Loading with Pre-built Agents + +Reference pre-built agents in YAML: + +```python +from flo_ai.arium import AriumBuilder +from flo_ai.agent import AgentBuilder +from flo_ai.llm import OpenAI + +# Create pre-built agents +agent1 = ( + AgentBuilder() + .with_name('pre_built_agent') + .with_prompt('You are a helpful assistant.') + .with_llm(OpenAI(model='gpt-4o-mini')) + .build() +) + +# Provide agents dictionary +agents_dict = { + 'pre_built_agent': agent1 +} + +# YAML can reference it: +# agents: +# - name: "pre_built_agent" + +arium_builder = AriumBuilder.from_yaml( + yaml_file='workflow.yaml', + agents=agents_dict +) +arium = arium_builder.build() +``` + +### Loading with Custom Routers + +Provide custom router functions: + +```python +from flo_ai.arium import AriumBuilder +from flo_ai.arium.memory import MessageMemory + +def custom_router(memory: MessageMemory) -> str: + # Custom routing logic + messages = memory.get() + if messages and "urgent" in str(messages[-1].result).lower(): + return "urgent_handler" + return "normal_handler" + +routers_dict = { + 'custom_router': custom_router +} + +arium_builder = AriumBuilder.from_yaml( + yaml_file='workflow.yaml', + routers=routers_dict +) +arium = arium_builder.build() +``` + +### Running Workflows + +```python +# Build and run in one step +result = await ( + AriumBuilder() + .from_yaml(yaml_file='workflow.yaml') + .build_and_run(["Input message"]) +) + +# Or build first, then run +arium_builder = AriumBuilder.from_yaml(yaml_file='workflow.yaml') +arium = arium_builder.build() +result = await arium.run(["Input message"]) + +# With variables +variables = {'user_name': 'John', 'company': 'TechCorp'} +result = await arium.run(["Input message"], variables=variables) +``` + +## Writing/Saving YAML Workflow Configuration + +To save a workflow configuration to YAML, you can manually construct the YAML structure: + +```python +import yaml +from flo_ai.arium import AriumBuilder + +# Create a workflow programmatically +arium_builder = ( + AriumBuilder() + .add_agent(agent1) + .add_agent(agent2) + .start_with(agent1) + .connect(agent1, agent2) + .end_with(agent2) +) + +# Create YAML configuration dictionary +yaml_config = { + 'metadata': { + 'name': 'my-workflow', + 'version': '1.0.0', + 'description': 'My workflow description' + }, + 'arium': { + 'agents': [ + { + 'name': 'agent1', + 'job': 'First agent prompt', + 'model': { + 'provider': 'openai', + 'name': 'gpt-4o-mini' + } + }, + { + 'name': 'agent2', + 'job': 'Second agent prompt', + 'model': { + 'provider': 'openai', + 'name': 'gpt-4o-mini' + } + } + ], + 'workflow': { + 'start': 'agent1', + 'edges': [ + { + 'from': 'agent1', + 'to': ['agent2'] + } + ], + 'end': ['agent2'] + } + } +} + +# Write to file +with open('exported-workflow.yaml', 'w') as f: + yaml.dump(yaml_config, f, default_flow_style=False, sort_keys=False) + +print("✅ Workflow configuration saved to exported-workflow.yaml") +``` + +## Validation and Testing + +### Schema Validation + +Use the `AriumYamlModel` for proper validation of YAML configurations: + +```python +from flo_ai.arium import AriumBuilder +from flo_ai.models.arium import AriumYamlModel +import yaml + +# Validate YAML structure using AriumYamlModel +def validate_workflow_yaml(file_path): + """Validate YAML configuration using AriumYamlModel. + + Args: + file_path: Path to YAML file to validate + + Returns: + bool: True if valid, False otherwise + """ + try: + with open(file_path, 'r') as f: + config = yaml.safe_load(f) + + # Use AriumYamlModel for validation + # This will validate all fields, types, and constraints + validated_config = AriumYamlModel(**config) + + print("✅ YAML configuration is valid") + print(f" Workflow start: {validated_config.arium.workflow.start}") + print(f" Number of agents: {len(validated_config.arium.agents or [])}") + if validated_config.metadata: + print(f" Metadata: {validated_config.metadata.name} v{validated_config.metadata.version}") + return True + except ValueError as e: + # AriumBuilder._validate_yaml_config raises ValueError with formatted errors + print(f"❌ YAML validation failed: {e}") + return False + except Exception as e: + print(f"❌ YAML validation failed: {e}") + return False + +# Validate a YAML file +validate_workflow_yaml('workflow.yaml') +``` + +You can also use `AriumBuilder.from_yaml()` which automatically validates the configuration: + +```python +from flo_ai.arium import AriumBuilder + +def validate_and_load_workflow(yaml_file): + """Validate and load workflow from YAML file. + + Args: + yaml_file: Path to YAML file + + Returns: + AriumBuilder or None if validation fails + """ + try: + # from_yaml automatically validates using AriumYamlModel + arium_builder = AriumBuilder.from_yaml(yaml_file=yaml_file) + print("✅ YAML configuration is valid and workflow builder created") + return arium_builder + except ValueError as e: + # Validation errors are raised as ValueError with detailed messages + print(f"❌ YAML validation failed:\n{e}") + return None + except Exception as e: + print(f"❌ Error loading workflow: {e}") + return None + +# Validate and load +arium_builder = validate_and_load_workflow('workflow.yaml') +if arium_builder: + arium = arium_builder.build() +``` + +### Testing YAML Workflows + +```python +import asyncio +from flo_ai.arium import AriumBuilder + +async def test_yaml_workflow(): + """Test a YAML workflow configuration.""" + try: + # Load and validate workflow (validation happens automatically) + arium_builder = AriumBuilder.from_yaml(yaml_file='workflow.yaml') + arium = arium_builder.build() + + # Test workflow execution + result = await arium.run(["Test input message"]) + assert result is not None + print(f"✅ Workflow executed: {len(result)} message(s)") + + # Test with variables + variables = {'user_name': 'Test User', 'company': 'TestCorp'} + result = await arium.run(["Test message"], variables=variables) + print(f"✅ Workflow with variables: {len(result)} message(s)") + + return True + except ValueError as e: + print(f"❌ Validation error: {e}") + return False + except Exception as e: + print(f"❌ Error: {e}") + return False + +# Run tests +asyncio.run(test_yaml_workflow()) +``` + +## Best Practices + +### Workflow Structure + +1. **Use meaningful names**: Choose descriptive agent, node, and router names +2. **Version your configurations**: Always include version numbers in metadata +3. **Document thoroughly**: Add descriptions for all components +4. **Validate schemas**: Use YAML schema validation tools + +### Performance Optimization + +```yaml +# Optimize for performance +arium: + agents: + - name: "fast_agent" + job: "Quick processing" + model: + provider: "openai" + name: "gpt-4o-mini" # Use faster model + temperature: 0.3 + max_tokens: 500 + settings: + max_retries: 2 # Limit retries + workflow: + start: "fast_agent" + edges: [] + end: ["fast_agent"] +``` + +### Security Considerations + +```yaml +# Secure configuration +arium: + agents: + - name: "secure_agent" + job: | + You are a secure assistant. Never: + - Share sensitive information + - Execute dangerous commands + model: + provider: "openai" + name: "gpt-4o" + temperature: 0.1 + max_tokens: 200 + timeout: 10 # Short timeout + settings: + max_retries: 1 + workflow: + start: "secure_agent" + edges: [] + end: ["secure_agent"] +``` + +## Examples + +### Content Analysis Workflow + +```yaml content-analysis.yaml +metadata: + name: "content-analysis" + version: "1.0.0" + description: "Analyzes and summarizes content" + +arium: + agents: + - name: "analyzer" + role: "Content Analyst" + job: | + Analyze the input content and: + 1. Extract key insights + 2. Identify main themes + 3. Note important details + model: + provider: "openai" + name: "gpt-4o" + temperature: 0.2 + + - name: "summarizer" + role: "Content Summarizer" + job: "Create a concise summary based on the analysis." + model: + provider: "anthropic" + name: "claude-3-5-sonnet-20240620" + temperature: 0.3 + + workflow: + start: "analyzer" + edges: + - from: "analyzer" + to: ["summarizer"] + end: ["summarizer"] +``` + +### Multi-Agent Routing Workflow + +```yaml routing-workflow.yaml +metadata: + name: "routing-workflow" + version: "1.0.0" + description: "Routes content to specialized agents" + +arium: + agents: + - name: "classifier" + job: "Classify the input content type." + model: + provider: "openai" + name: "gpt-4o-mini" + + - name: "technical_writer" + job: "Write technical documentation." + model: + provider: "openai" + name: "gpt-4o" + + - name: "creative_writer" + job: "Write creative content." + model: + provider: "anthropic" + name: "claude-3-5-sonnet-20240620" + + routers: + - name: "content_router" + type: "smart" + routing_options: + technical_writer: "Technical content, documentation, code" + creative_writer: "Creative writing, stories, fiction" + model: + provider: "openai" + name: "gpt-4o-mini" + temperature: 0.3 + + workflow: + start: "classifier" + edges: + - from: "classifier" + to: ["technical_writer", "creative_writer"] + router: "content_router" + end: ["technical_writer", "creative_writer"] +``` + +### Reflection Workflow + +```yaml reflection-workflow.yaml +metadata: + name: "reflection-workflow" + version: "1.0.0" + description: "Writer-critic feedback loop" + +arium: + agents: + - name: "writer" + job: "Write content based on feedback." + model: + provider: "openai" + name: "gpt-4o" + + - name: "critic" + job: "Review and provide constructive feedback." + model: + provider: "anthropic" + name: "claude-3-5-sonnet-20240620" + + - name: "editor" + job: "Finalize the content." + model: + provider: "openai" + name: "gpt-4o" + + routers: + - name: "reflection_router" + type: "reflection" + flow_pattern: ["writer", "critic", "writer", "editor"] + model: + provider: "openai" + name: "gpt-4o-mini" + settings: + allow_early_exit: true + + workflow: + start: "writer" + edges: + - from: "writer" + to: ["critic"] + router: "reflection_router" + end: ["editor"] +``` + +This YAML-based approach makes workflow configuration declarative, versionable, and easily shareable across teams! diff --git a/documentation/development.mdx b/documentation/flo-ai/development.mdx similarity index 55% rename from documentation/development.mdx rename to documentation/flo-ai/development.mdx index caa58912..653a6360 100644 --- a/documentation/development.mdx +++ b/documentation/flo-ai/development.mdx @@ -68,7 +68,7 @@ async def test_installation(): ) response = await agent.run('Hello, world!') - print(f'Agent response: {response}') + print(f'Agent response: {response[-1].content}') asyncio.run(test_installation()) ``` @@ -76,33 +76,6 @@ asyncio.run(test_installation()) -## Development Tools - -### Flo AI Studio - -The Flo AI Studio is a visual workflow designer for creating AI agent workflows: - - - - -```bash -cd studio -pnpm install -``` - - - - - -```bash -pnpm dev -``` - -The studio will be available at `http://localhost:5173`. - - - - ### Testing Run the test suite to ensure everything is working correctly: @@ -125,57 +98,49 @@ Understanding the Flo AI project structure: ``` flo_ai/ ├── flo_ai/ # Core package -│ ├── builder/ # Agent builder components -│ ├── llm/ # LLM provider integrations -│ ├── tool/ # Tool framework -│ ├── arium/ # Workflow orchestration -│ ├── models/ # Data models -│ └── telemetry/ # Observability -├── examples/ # Example implementations -├── tests/ # Test suite -└── studio/ # Visual workflow designer +│ ├── agent/ # Agent +│ ├── arium/ # Workflow orchestration +│ ├── formatter/ # Format parsers +│ ├── helpers/ # Helper utilities +│ ├── llm/ # LLM provider integrations +│ ├── models/ # Data models +│ ├── telemetry/ # Observability +│ ├── tool/ # Tool framework +│ └── utils/ # Utility functions +├── docs/ # Documentation +├── examples/ # Example implementations +└── tests/ # Test suite + ├── integration-tests/ # Integration tests + └── unit-tests/ # Unit tests ``` ## Contributing - - - - 1. Fork the repository - 2. Clone your fork: `git clone https://github.com/your-username/flo-ai.git` - 3. Install in development mode: `pip install -e .` - 4. Install development dependencies: `pip install -e ".[dev]"` - 5. Run tests: `pytest` +Learn how to set up your development environment and submit changes to the Flo AI project. - +#### Prerequisites - +Before setting up for development, ensure you have the following tools installed: - Flo AI uses pre-commit hooks for code formatting: - - ```bash - # Install pre-commit - pip install pre-commit +- **uv**: Fast Python package installer and resolver. [Install uv](https://docs.astral.sh/uv/getting-started/installation/) +- **pnpm**(optional): Package manager for Node.js. [Install pnpm](https://pnpm.io/installation) - # Install hooks - pre-commit install - - # Run on all files - pre-commit run --all-files - ``` - - +#### Setting up for development - +1. Fork the wavefront repository +2. Clone your fork: `git clone https://github.com/your-username/wavefront.git` +3. Enable workspace mode to start development (Recommended) +4. Run the `install-dep-local.sh` script in the root directory, or navigate to the `flo_ai` workspace and run `uv sync` +5. Start a new terminal in the `flo_ai` workspace +6. Start contributing! - 1. Create a feature branch: `git checkout -b feature/your-feature` - 2. Make your changes and add tests - 3. Run tests: `pytest` - 4. Commit with conventional commits: `git commit -m "feat: add new feature"` - 5. Push and create a pull request +#### Submitting changes - - +1. Create a feature branch: `git checkout -b feature/your-feature` +2. Make your changes and add tests +3. Run tests: `pytest` +4. Commit with conventional commits: `git commit -m "feat: add new feature"`. Ensure the pre-commit hook runs without any errors +5. Push and create a pull request ## Troubleshooting @@ -204,19 +169,6 @@ flo_ai/ ``` - - - - If the studio doesn't load, try: - - ```bash - cd studio - rm -rf node_modules - pnpm install - pnpm dev - ``` - - ## Need Help? diff --git a/documentation/essentials/code.mdx b/documentation/flo-ai/examples/code.mdx similarity index 100% rename from documentation/essentials/code.mdx rename to documentation/flo-ai/examples/code.mdx diff --git a/documentation/flo-ai.mdx b/documentation/flo-ai/index.mdx similarity index 98% rename from documentation/flo-ai.mdx rename to documentation/flo-ai/index.mdx index 7a2d0d96..c45fed45 100644 --- a/documentation/flo-ai.mdx +++ b/documentation/flo-ai/index.mdx @@ -1,5 +1,5 @@ --- -title: "Flo AI Documentation" +title: "Flo AI" description: "Build production-ready AI agents with structured outputs, tool integration, and multi-LLM support" --- diff --git a/documentation/quickstart.mdx b/documentation/flo-ai/quickstart.mdx similarity index 97% rename from documentation/quickstart.mdx rename to documentation/flo-ai/quickstart.mdx index 5d432e1e..c58fa75a 100644 --- a/documentation/quickstart.mdx +++ b/documentation/flo-ai/quickstart.mdx @@ -55,7 +55,7 @@ async def main(): ) response = await agent.run('What is the formula for the area of a circle?') - print(f'Response: {response}') + print(f'Response: {response[-1].content}') asyncio.run(main()) ``` @@ -96,7 +96,7 @@ async def main(): ) response = await agent.run('Calculate 5 plus 3') - print(f'Response: {response}') + print(f'Response: {response[-1].content}') asyncio.run(main()) ``` diff --git a/flo_ai/README.md b/flo_ai/README.md index a89f82ce..126c7dfc 100644 --- a/flo_ai/README.md +++ b/flo_ai/README.md @@ -167,52 +167,6 @@ async def main(): asyncio.run(main()) ``` -## 🎨 Flo AI Studio - Visual Workflow Designer - -**Create AI workflows visually with our powerful React-based studio!** - -

- Flo AI Studio - Visual Workflow Designer -

- -Flo AI Studio is a modern, intuitive visual editor that allows you to design complex multi-agent workflows through a drag-and-drop interface. Build sophisticated AI systems without writing code, then export them as production-ready YAML configurations. - -### 🚀 Studio Features - -- **🎯 Visual Design**: Drag-and-drop interface for creating agent workflows -- **🤖 Agent Management**: Configure AI agents with different roles, models, and tools -- **🔀 Smart Routing**: Visual router configuration for intelligent workflow decisions -- **📤 YAML Export**: Export workflows as Flo AI-compatible YAML configurations -- **📥 YAML Import**: Import existing workflows for further editing -- **✅ Workflow Validation**: Real-time validation and error checking -- **🔧 Tool Integration**: Connect agents to external tools and APIs -- **📋 Template System**: Quick start with pre-built agent and router templates - -### 🏃‍♂️ Quick Start with Studio - -1. **Start the Studio**: - - ```bash - cd studio - pnpm install - pnpm dev - ``` - -2. **Design Your Workflow**: - - - Add agents, routers, and tools to the canvas - - Configure their properties and connections - - Test with the built-in validation - -3. **Export & Run**: - -```python -from flo_ai.arium import AriumBuilder - - builder = AriumBuilder.from_yaml(yaml_file='your_workflow.yaml') - result = await builder.build_and_run(['Your input here']) -``` - ## 🔧 Core Features ### LLM Providers diff --git a/flo_ai/flo_ai/agent/agent.py b/flo_ai/flo_ai/agent/agent.py index 89977035..d23602b6 100644 --- a/flo_ai/flo_ai/agent/agent.py +++ b/flo_ai/flo_ai/agent/agent.py @@ -77,7 +77,7 @@ async def run( ) -> List[BaseMessage]: variables = variables or {} if isinstance(inputs, str): - inputs = [UserMessage(TextMessageContent(text=inputs))] + inputs = [UserMessage(content=resolve_variables(inputs, variables))] # Perform runtime variable validation if not already resolved (single agent usage) if not self.resolved_variables: diff --git a/flo_ai/flo_ai/arium/nodes.py b/flo_ai/flo_ai/arium/nodes.py index 0d32f365..4af2ee67 100644 --- a/flo_ai/flo_ai/arium/nodes.py +++ b/flo_ai/flo_ai/arium/nodes.py @@ -177,13 +177,13 @@ class FunctionNode: def __init__( self, name: str, - description: str, function: Callable[..., Any], + description: Optional[str] = None, prefilled_params: Optional[Dict[str, Any]] = None, input_filter: Optional[List[str]] = None, ) -> None: self.name = name - self.description = description + self.description = description or f"Function node '{name}'" self.function = function self.prefilled_params = prefilled_params or {} self.input_filter: Optional[List[str]] = input_filter diff --git a/flo_ai/pyproject.toml b/flo_ai/pyproject.toml index 8f6daf94..928575e4 100644 --- a/flo_ai/pyproject.toml +++ b/flo_ai/pyproject.toml @@ -53,13 +53,14 @@ dev = [ [tool.pytest.ini_options] asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "function" +addopts = "-m 'not integration'" markers = [ "integration: tests that make actual LLM API calls and require API keys", - "llm_tests: tests that make actual LLM API calls and require API keys" + "unit_tests: unit tests that do not require external API calls" ] [tool.uv] -required-version = ">=0.7.3" +required-version = ">=0.8.6" [tool.hatch.build.targets.sdist] include = ["flo_ai"] diff --git a/wavefront/README.md b/wavefront/README.md index 3e164ce0..3601eb9d 100644 --- a/wavefront/README.md +++ b/wavefront/README.md @@ -10,7 +10,7 @@ The project has its backend services written in python and frontend in reactjs. - Python >=3.11 - Node.js >=22.12 -- uv >=0.7.15 +- uv >=0.8.6 - pnpm >=10.13.1 ## Wavefront Overview @@ -19,12 +19,12 @@ The platform consist of following components, which create a microservice mesh t The platform consists of following components: -| Service | Port | Description | Release Status | -|---------|------|-------------|----------------| -| **floware** | 8001 | Core AI middleware service. This service connects wavefront to multiple backends, databases, AI models and more. This is the core control center of the platform. | Beta | -| **floconsole** | 8002 | Management console service, this module is multi-app control centre for configuriong multiple apps on the wavefront middleware. | Beta | -| **inference_app** | 8003 | Inference App service. A simple service for running pytorch models. We right now support all models which works on pytorch version 0.16.0. | Experimental | -| **call_processing** | 8004 | Voice call processing service (Pipecat) | Beta | +| Service | Port | Description | Release Status | +| ------------------- | ---- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| **floware** | 8001 | Core AI middleware service. This service connects wavefront to multiple backends, databases, AI models and more. This is the core control center of the platform. | Beta | +| **floconsole** | 8002 | Management console service, this module is multi-app control centre for configuriong multiple apps on the wavefront middleware. | Beta | +| **inference_app** | 8003 | Inference App service. A simple service for running pytorch models. We right now support all models which works on pytorch version 0.16.0. | Experimental | +| **call_processing** | 8004 | Voice call processing service (Pipecat) | Beta | #### Middleware High Level Architecture: @@ -38,7 +38,7 @@ The platform consists of following components: Wavefront

-The project is ready for production use only in Google Cloud Console and Amazon Web Services. +The project is ready for production use only in Google Cloud Console and Amazon Web Services. In the current state of the project, it requires following cloud services to run locally (We are working on removing this dependencies for local runs): - Google Cloud Storage or Amazon S3 @@ -47,7 +47,7 @@ In the current state of the project, it requires following cloud services to run ## Quick Start > [!WARNING] -> +> > - This project is under active development and APIs may change without notice. Please checkout the [platform docs](https://wavefront.rootflo.ai) for the latest information. > - The platform is not in the GA state, and there are unimplemented feature. Checkout [ROADMAP.md](../ROADMAP.md) for the list of features, and whats missing. @@ -91,7 +91,7 @@ Now open http://localhost:5173 in your browser to see the frontend & login with ## Next Steps -Connect your datasources, and create your first agent & workflow or go ahead an try out the voice bot feature. +Connect your datasources, and create your first agent & workflow or go ahead an try out the voice bot feature. - Checkout the platform docs here [https://wavefront.rootflo.ai](https://wavefront.rootflo.ai/). -- Incase you face any issues, dont hesitate to reach out to schedule a call with us [here](https://calendly.com/meetings-rootflo/30min) \ No newline at end of file +- Incase you face any issues, dont hesitate to reach out to schedule a call with us [here](https://calendly.com/meetings-rootflo/30min) diff --git a/wavefront/server/docker/call_processing.Dockerfile b/wavefront/server/docker/call_processing.Dockerfile index 0e6d11b4..f948b7ef 100644 --- a/wavefront/server/docker/call_processing.Dockerfile +++ b/wavefront/server/docker/call_processing.Dockerfile @@ -2,7 +2,7 @@ FROM python:3.11-slim WORKDIR /app -COPY --from=ghcr.io/astral-sh/uv:0.7.15 /uv /uvx /bin/ +COPY --from=ghcr.io/astral-sh/uv:0.8.6 /uv /uvx /bin/ RUN apt-get update && apt-get install -y \ libpq-dev \ diff --git a/wavefront/server/docker/floconsole.Dockerfile b/wavefront/server/docker/floconsole.Dockerfile index b7353933..abf83d5b 100644 --- a/wavefront/server/docker/floconsole.Dockerfile +++ b/wavefront/server/docker/floconsole.Dockerfile @@ -2,7 +2,7 @@ FROM python:3.11-slim WORKDIR /app -COPY --from=ghcr.io/astral-sh/uv:0.7.15 /uv /uvx /bin/ +COPY --from=ghcr.io/astral-sh/uv:0.8.6 /uv /uvx /bin/ RUN apt-get update && apt-get install -y \ libpq-dev \ diff --git a/wavefront/server/docker/floware.Dockerfile b/wavefront/server/docker/floware.Dockerfile index 7d92f228..1aeacb5d 100644 --- a/wavefront/server/docker/floware.Dockerfile +++ b/wavefront/server/docker/floware.Dockerfile @@ -2,7 +2,7 @@ FROM python:3.11-slim WORKDIR /app -COPY --from=ghcr.io/astral-sh/uv:0.7.15 /uv /uvx /bin/ +COPY --from=ghcr.io/astral-sh/uv:0.8.6 /uv /uvx /bin/ RUN apt-get update && apt-get install -y \ libpq-dev \ diff --git a/wavefront/server/docker/inference_app.Dockerfile b/wavefront/server/docker/inference_app.Dockerfile index 3c2d730f..0981e3e8 100644 --- a/wavefront/server/docker/inference_app.Dockerfile +++ b/wavefront/server/docker/inference_app.Dockerfile @@ -2,7 +2,7 @@ FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 WORKDIR /app -COPY --from=ghcr.io/astral-sh/uv:0.7.15 /uv /uvx /bin/ +COPY --from=ghcr.io/astral-sh/uv:0.8.6 /uv /uvx /bin/ RUN apt-get update && apt-get install -y \ libpq-dev \ diff --git a/wavefront/server/modules/agents_module/pyproject.toml b/wavefront/server/modules/agents_module/pyproject.toml index 3f0a9bc6..10f1f321 100644 --- a/wavefront/server/modules/agents_module/pyproject.toml +++ b/wavefront/server/modules/agents_module/pyproject.toml @@ -13,7 +13,7 @@ dependencies = [ "flo-utils", "tools-module", "api-services-module", - "flo-ai>=1.1.0-rc5", + "flo-ai==1.1.0-rc5", ] [tool.uv.sources] diff --git a/wavefront/server/modules/knowledge_base_module/pyproject.toml b/wavefront/server/modules/knowledge_base_module/pyproject.toml index 382d9c3f..49b47823 100644 --- a/wavefront/server/modules/knowledge_base_module/pyproject.toml +++ b/wavefront/server/modules/knowledge_base_module/pyproject.toml @@ -17,7 +17,7 @@ dependencies = [ "pandas~=2.2.3", "ollama~=0.4.8", "textract~=1.6.5", - "flo-ai>=1.1.0-rc5", + "flo-ai==1.1.0-rc5", "google-cloud-pubsub~=2.30.0", "boto3<=1.38.40", "pyyaml>=6.0.3,<7", diff --git a/wavefront/server/modules/tools_module/pyproject.toml b/wavefront/server/modules/tools_module/pyproject.toml index dc8b22b9..9478878b 100644 --- a/wavefront/server/modules/tools_module/pyproject.toml +++ b/wavefront/server/modules/tools_module/pyproject.toml @@ -3,7 +3,7 @@ name = "tools_module" version = "0.1.0" description = "Tools module for Flo AI agent system" dependencies = [ - "flo-ai>=1.1.0-rc5", + "flo-ai==1.1.0-rc5", "flo_cloud", "datasource", diff --git a/wavefront/server/uv.lock b/wavefront/server/uv.lock index 3d973107..258a3d7e 100644 --- a/wavefront/server/uv.lock +++ b/wavefront/server/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.11" resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'darwin'", @@ -90,7 +90,7 @@ dependencies = [ requires-dist = [ { name = "api-services-module", editable = "modules/api_services_module" }, { name = "common-module", editable = "modules/common_module" }, - { name = "flo-ai", specifier = ">=1.1.0rc5" }, + { name = "flo-ai", specifier = "==1.1.0rc5" }, { name = "flo-cloud", editable = "packages/flo_cloud" }, { name = "flo-utils", editable = "packages/flo_utils" }, { name = "tools-module", editable = "modules/tools_module" }, @@ -648,7 +648,7 @@ version = "2.0.17" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, - { name = "audioop-lts", marker = "python_full_version >= '3.13' and python_full_version < '4.0'" }, + { name = "audioop-lts", marker = "python_full_version >= '3.13' and python_full_version < '4'" }, { name = "httpx" }, { name = "httpx-sse" }, { name = "iterators" }, @@ -2576,7 +2576,7 @@ dev = [ requires-dist = [ { name = "boto3", specifier = "<=1.38.40" }, { name = "datasource", editable = "plugins/datasource" }, - { name = "flo-ai", specifier = ">=1.1.0rc5" }, + { name = "flo-ai", specifier = "==1.1.0rc5" }, { name = "flo-cloud", editable = "packages/flo_cloud" }, { name = "google-cloud-pubsub", specifier = "~=2.30.0" }, { name = "numpy", specifier = ">=1.24,<2.0" }, @@ -5482,7 +5482,7 @@ dev = [ requires-dist = [ { name = "common-module", editable = "modules/common_module" }, { name = "datasource", editable = "plugins/datasource" }, - { name = "flo-ai", specifier = ">=1.1.0rc5" }, + { name = "flo-ai", specifier = "==1.1.0rc5" }, { name = "flo-cloud", editable = "packages/flo_cloud" }, { name = "knowledge-base-module", editable = "modules/knowledge_base_module" }, { name = "plugins-module", editable = "modules/plugins_module" },