Skip to content
15 changes: 4 additions & 11 deletions flo_ai/flo_ai/helpers/llm_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,18 +220,11 @@ def _create_rootflo_llm(model_config: Dict[str, Any], **kwargs) -> 'BaseLLM':
f'(ROOTFLO_BASE_URL, ROOTFLO_APP_KEY, ROOTFLO_APP_SECRET, ROOTFLO_ISSUER, ROOTFLO_AUDIENCE).'
)
else:
# Access token flow - only needs base_url and app_key
required_params = {
'base_url': base_url,
'app_key': app_key,
}
missing = [k for k, v in required_params.items() if not v]

if missing:
# Access token flow - only needs base_url
if not base_url:
raise ValueError(
f'RootFlo configuration incomplete. Missing required parameters: {", ".join(missing)}. '
f'These can be provided via kwargs or environment variables '
f'(ROOTFLO_BASE_URL, ROOTFLO_APP_KEY).'
'RootFlo configuration incomplete. Missing required parameter: base_url. '
'Provide it in model_config, as a kwarg, or via ROOTFLO_BASE_URL environment variable.'
)

return RootFloLLM(
Expand Down
28 changes: 16 additions & 12 deletions flo_ai/flo_ai/llm/rootflo_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ def __init__(
self,
base_url: str,
model_id: str,
app_key: str,
app_secret: str,
issuer: str,
audience: str,
app_key: Optional[str] = None,
app_secret: Optional[str] = None,
issuer: Optional[str] = None,
audience: Optional[str] = None,
access_token: Optional[str] = None,
temperature: float = 0.7,
**kwargs,
Expand Down Expand Up @@ -82,9 +82,6 @@ def __init__(
f'Missing required parameters for JWT generation: {", ".join(missing)}. '
f'Either provide these parameters or pass an access_token directly.'
)
else: # app key is still required
if not app_key:
raise ValueError('app_key is required even when using access_token')

# Store initialization parameters for lazy initialization
self._base_url = base_url
Expand Down Expand Up @@ -117,7 +114,11 @@ def __init__(
)

async def _fetch_llm_config_async(
self, base_url: str, model_id: str, api_token: str, app_key: str
self,
base_url: str,
model_id: str,
api_token: str,
app_key: Optional[str] = None,
) -> Dict[str, Any]:
"""
Fetch LLM configuration from the API endpoint asynchronously.
Expand All @@ -126,7 +127,7 @@ async def _fetch_llm_config_async(
base_url: The base URL of the API server
model_id: The model identifier (config_id)
api_token: The JWT token for authorization
app_key: Application key for X-Rootflo-Key header
app_key: Optional application key for X-Rootflo-Key header

Returns:
Dict containing llm_model and type
Expand All @@ -137,9 +138,12 @@ async def _fetch_llm_config_async(
config_url = f'{base_url}/v1/llm-inference-configs/{model_id}'
headers = {
'Authorization': f'Bearer {api_token}',
'X-Rootflo-Key': app_key,
}

# Only add X-Rootflo-Key header if app_key is provided
if app_key:
headers['X-Rootflo-Key'] = app_key

try:
async with httpx.AsyncClient() as client:
response = await client.get(config_url, headers=headers, timeout=30.0)
Expand Down Expand Up @@ -226,8 +230,8 @@ async def _ensure_initialized(self):
# Construct full URL for LLM inference
full_url = f'{self._base_url}/v1/llm-inference/{self._model_id}'

# Prepare custom headers for proxy authentication
custom_headers = {'X-Rootflo-Key': self._app_key}
# Prepare custom headers for proxy authentication (only if app_key is provided)
custom_headers = {'X-Rootflo-Key': self._app_key} if self._app_key else {}

# Instantiate appropriate SDK wrapper based on llm_provider
if llm_provider == LLMProvider.OPENAI:
Expand Down