Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion flo_ai/flo_ai/arium/arium.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ def _resolve_inputs(
)
)
else:
# ImageMessage and DocumentMessage objects don't need variable resolution
# ImageMessageContent and DocumentMessage objects don't need variable resolution
resolved_inputs.append(input_item)
return resolved_inputs

Expand Down
34 changes: 10 additions & 24 deletions flo_ai/flo_ai/arium/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,7 @@ def from_yaml(
tools: Optional[Dict[str, Tool]] = None,
routers: Optional[Dict[str, Callable]] = None,
base_llm: Optional[BaseLLM] = None,
**kwargs,
) -> 'AriumBuilder':
"""Create an AriumBuilder from a YAML configuration.

Expand Down Expand Up @@ -478,7 +479,7 @@ def from_yaml(
and 'yaml_file' not in agent_config
):
agent = cls._create_agent_from_direct_config(
agent_config, base_llm, tools
agent_config, base_llm, tools, **kwargs
)

# Method 3: Inline YAML config
Expand Down Expand Up @@ -537,7 +538,7 @@ def from_yaml(
router_llm = None
if 'model' in router_config:
router_llm = cls._create_llm_from_config(
router_config['model'], base_llm
router_config['model'], base_llm, **kwargs
)
else:
router_llm = base_llm # Use base LLM if no specific model configured
Expand Down Expand Up @@ -832,7 +833,9 @@ def _find_node(node_name: str):

@staticmethod
def _create_llm_from_config(
model_config: Dict[str, Any], base_llm: Optional[BaseLLM] = None
model_config: Dict[str, Any],
base_llm: Optional[BaseLLM] = None,
**kwargs,
) -> BaseLLM:
"""Create an LLM instance from model configuration.

Expand All @@ -843,33 +846,16 @@ def _create_llm_from_config(
Returns:
BaseLLM: Configured LLM instance
"""
from flo_ai.llm import OpenAI, Anthropic, Gemini, OllamaLLM

provider = model_config.get('provider', 'openai').lower()
model_name = model_config.get('name')
base_url = model_config.get('base_url')

if not model_name:
raise ValueError('Model name must be specified in model configuration')

if provider == 'openai':
llm = OpenAI(model=model_name, base_url=base_url)
elif provider == 'anthropic':
llm = Anthropic(model=model_name, base_url=base_url)
elif provider == 'gemini':
llm = Gemini(model=model_name, base_url=base_url)
elif provider == 'ollama':
llm = OllamaLLM(model=model_name, base_url=base_url)
else:
raise ValueError(f'Unsupported model provider: {provider}')
from flo_ai.helpers.llm_factory import create_llm_from_config

return llm
return create_llm_from_config(model_config, **kwargs)

@staticmethod
def _create_agent_from_direct_config(
agent_config: Dict[str, Any],
base_llm: Optional[BaseLLM] = None,
available_tools: Optional[Dict[str, Tool]] = None,
**kwargs,
) -> Agent:
"""Create an Agent from direct YAML configuration.

Expand All @@ -891,7 +877,7 @@ def _create_agent_from_direct_config(

# Configure LLM
if 'model' in agent_config and base_llm is None:
llm = AriumBuilder._create_llm_from_config(agent_config['model'])
llm = AriumBuilder._create_llm_from_config(agent_config['model'], **kwargs)
elif base_llm:
llm = base_llm
else:
Expand Down
39 changes: 10 additions & 29 deletions flo_ai/flo_ai/builder/agent_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import yaml
from flo_ai.models.agent import Agent
from flo_ai.models.base_agent import ReasoningPattern
from flo_ai.llm import BaseLLM, OpenAI, Anthropic, Gemini, OllamaLLM, VertexAI
from flo_ai.llm import BaseLLM
from flo_ai.tool.base_tool import Tool
from flo_ai.tool.tool_config import ToolConfig, create_tool_config
from flo_ai.formatter.yaml_format_parser import FloYamlParser
Expand Down Expand Up @@ -190,6 +190,7 @@ def from_yaml(
tools: Optional[List[Tool]] = None,
base_llm: Optional[BaseLLM] = None,
tool_registry: Optional[Dict[str, Tool]] = None,
**kwargs,
) -> 'AgentBuilder':
"""Create an agent builder from a YAML configuration string

Expand Down Expand Up @@ -219,35 +220,15 @@ def from_yaml(

# Configure LLM based on model settings
if 'model' in agent_config and base_llm is None:
base_url = agent_config.get('base_url', None)
from flo_ai.helpers.llm_factory import create_llm_from_config

model_config: dict = agent_config['model']
provider = model_config.get('provider', 'openai').lower()
model_name = model_config.get('name')

if not model_name:
raise ValueError('Model name must be specified in YAML configuration')

if provider == 'openai':
builder.with_llm(OpenAI(model=model_name, base_url=base_url))
elif provider == 'anthropic':
builder.with_llm(Anthropic(model=model_name, base_url=base_url))
elif provider == 'gemini':
builder.with_llm(Gemini(model=model_name, base_url=base_url))
elif provider == 'ollama':
builder.with_llm(OllamaLLM(model=model_name, base_url=base_url))
elif provider == 'vertexai':
project = model_config.get('project')
location = model_config.get('location', 'asia-south1')
builder.with_llm(
VertexAI(
model=model_name,
project=project,
location=location,
base_url=base_url,
)
)
else:
raise ValueError(f'Unsupported model provider: {provider}')
# Merge base_url from agent_config if present and not in model_config
if 'base_url' in agent_config and 'base_url' not in model_config:
model_config = {**model_config, 'base_url': agent_config['base_url']}

llm = create_llm_from_config(model_config, **kwargs)
builder.with_llm(llm)
else:
if base_llm is None:
raise ValueError(
Expand Down
Empty file.
220 changes: 220 additions & 0 deletions flo_ai/flo_ai/helpers/llm_factory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
"""
LLM Factory - Centralized LLM creation from configuration.

This module provides a unified factory function for creating LLM instances
from configuration dictionaries, supporting all providers in the flo_ai ecosystem.
"""

import os
from typing import Dict, Any, TYPE_CHECKING

if TYPE_CHECKING:
from flo_ai.llm import BaseLLM


class LLMFactory:
"""Factory class for creating LLM instances from configuration."""

SUPPORTED_PROVIDERS = {
'openai',
'anthropic',
'gemini',
'ollama',
'vertexai',
'rootflo',
}

@staticmethod
def create_llm(model_config: Dict[str, Any], **kwargs) -> 'BaseLLM':
"""Create an LLM instance from model configuration.

Args:
model_config: Dictionary containing model configuration with keys:
- provider (str): LLM provider name (default: 'openai')
- name (str): Model name (required for most providers)
- base_url (str, optional): Custom base URL
- model_id (str): For RootFlo provider
- project (str): For VertexAI provider
- location (str): For VertexAI provider (default: 'asia-south1')
**kwargs: Additional parameters that override config and env vars:
- base_url: Override base URL
- For RootFlo: app_key, app_secret, issuer, audience, access_token

Returns:
BaseLLM: Configured LLM instance

Raises:
ValueError: If provider is unsupported or required parameters are missing

Examples:
>>> # OpenAI
>>> llm = LLMFactory.create_llm({'provider': 'openai', 'name': 'gpt-4'})

>>> # VertexAI with project
>>> llm = LLMFactory.create_llm({
... 'provider': 'vertexai',
... 'name': 'gemini-pro',
... 'project': 'my-project',
... 'location': 'us-central1'
... })

>>> # RootFlo with auth
>>> llm = LLMFactory.create_llm(
... {'provider': 'rootflo', 'model_id': 'model-123'},
... app_key='key', app_secret='secret', issuer='iss', audience='aud'
... )
"""
provider = model_config.get('provider', 'openai').lower()

if provider not in LLMFactory.SUPPORTED_PROVIDERS:
raise ValueError(
f'Unsupported model provider: {provider}. '
f'Supported providers: {", ".join(sorted(LLMFactory.SUPPORTED_PROVIDERS))}'
)

if provider == 'rootflo':
return LLMFactory._create_rootflo_llm(model_config, **kwargs)
elif provider == 'vertexai':
return LLMFactory._create_vertexai_llm(model_config, **kwargs)
else:
return LLMFactory._create_standard_llm(provider, model_config, **kwargs)

@staticmethod
def _create_standard_llm(
provider: str, model_config: Dict[str, Any], **kwargs
) -> 'BaseLLM':
"""Create standard LLM instances (OpenAI, Anthropic, Gemini, Ollama)."""
from flo_ai.llm import OpenAI, Anthropic, Gemini, OllamaLLM

model_name = model_config.get('name')
if not model_name:
raise ValueError(
f'{provider.title()} provider requires "name" parameter in model configuration'
)

# Priority: kwargs > model_config > None
base_url = kwargs.get('base_url') or model_config.get('base_url')

provider_map = {
'openai': OpenAI,
'anthropic': Anthropic,
'gemini': Gemini,
'ollama': OllamaLLM,
}

llm_class = provider_map[provider]
return llm_class(model=model_name, base_url=base_url)

@staticmethod
def _create_vertexai_llm(model_config: Dict[str, Any], **kwargs) -> 'BaseLLM':
"""Create VertexAI LLM instance with project and location."""
from flo_ai.llm import VertexAI

model_name = model_config.get('name')
if not model_name:
raise ValueError(
'VertexAI provider requires "name" parameter in model configuration'
)

# Get VertexAI-specific parameters
project = kwargs.get('project') or model_config.get('project')
location = kwargs.get('location') or model_config.get('location', 'asia-south1')
base_url = kwargs.get('base_url') or model_config.get('base_url')

if not project:
raise ValueError(
'VertexAI provider requires "project" parameter. '
'Provide it in model_config or as a kwarg.'
)

return VertexAI(
model=model_name,
project=project,
location=location,
base_url=base_url,
)

@staticmethod
def _create_rootflo_llm(model_config: Dict[str, Any], **kwargs) -> 'BaseLLM':
"""Create RootFlo LLM instance with authentication."""
from flo_ai.llm import RootFloLLM

model_id = model_config.get('model_id')
if not model_id:
raise ValueError(
'RootFlo provider requires "model_id" in model configuration'
)

# Gather RootFlo parameters from kwargs or environment
base_url = (
kwargs.get('base_url')
or model_config.get('base_url')
or os.getenv('ROOTFLO_BASE_URL')
)
app_key = kwargs.get('app_key') or os.getenv('ROOTFLO_APP_KEY')
app_secret = kwargs.get('app_secret') or os.getenv('ROOTFLO_APP_SECRET')
issuer = kwargs.get('issuer') or os.getenv('ROOTFLO_ISSUER')
audience = kwargs.get('audience') or os.getenv('ROOTFLO_AUDIENCE')
access_token = kwargs.get('access_token') # Optional, from kwargs only

# Validate required parameters based on auth method
if not access_token:
# JWT auth flow - requires all parameters
required_params = {
'base_url': base_url,
'app_key': app_key,
'app_secret': app_secret,
'issuer': issuer,
'audience': audience,
}
missing = [k for k, v in required_params.items() if not v]

if missing:
raise ValueError(
f'RootFlo configuration incomplete. Missing required parameters: {", ".join(missing)}. '
f'These can be provided via kwargs or environment variables '
f'(ROOTFLO_BASE_URL, ROOTFLO_APP_KEY, ROOTFLO_APP_SECRET, ROOTFLO_ISSUER, ROOTFLO_AUDIENCE).'
)
else:
# Access token flow - only needs base_url and app_key
required_params = {
'base_url': base_url,
'app_key': app_key,
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is this app_key ?

}
missing = [k for k, v in required_params.items() if not v]

if missing:
raise ValueError(
f'RootFlo configuration incomplete. Missing required parameters: {", ".join(missing)}. '
f'These can be provided via kwargs or environment variables '
f'(ROOTFLO_BASE_URL, ROOTFLO_APP_KEY).'
)

return RootFloLLM(
base_url=base_url,
model_id=model_id,
app_key=app_key,
app_secret=app_secret,
issuer=issuer,
audience=audience,
access_token=access_token,
)


# Convenience function for direct import
def create_llm_from_config(model_config: Dict[str, Any], **kwargs) -> 'BaseLLM':
"""
Convenience function to create an LLM instance from configuration.

This is a wrapper around LLMFactory.create_llm() for easier imports.

Args:
model_config: Dictionary containing model configuration
**kwargs: Additional parameters that override config and env vars

Returns:
BaseLLM: Configured LLM instance

See LLMFactory.create_llm() for detailed documentation.
"""
return LLMFactory.create_llm(model_config, **kwargs)
2 changes: 2 additions & 0 deletions flo_ai/flo_ai/llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from .gemini_llm import Gemini
from .openai_vllm import OpenAIVLLM
from .vertexai_llm import VertexAI
from .rootflo_llm import RootFloLLM

__all__ = [
'BaseLLM',
Expand All @@ -14,4 +15,5 @@
'Gemini',
'OpenAIVLLM',
'VertexAI',
'RootFloLLM',
]
Loading
Loading