Skip to content
Merged

Dev #127

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions agent_core/core/credentials/embedded_credentials.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
"NTQwMzU1MDYyMDA1LTM3Y3RmcjBhNHVlazFjMWZzcDRzc25sd",
"GhkdGJkbzZ2LmFwcHMuZ29vZ2xldXNlcmNvbnRlbnQuY29t",
],
"client_secret": ["R09DU1BYLTRpRi12Zmxac0xWYmNabXE2U3ZHTUw4RDllSHo="],
},
"zoom": {
"client_id": ["YWlsaURjY0JUUGlaZ", "W5Ka29acldHZw=="],
Expand All @@ -50,6 +51,10 @@
"client_id": ["ODZ4aXVvZHQ", "2cjQ3MnU="],
"client_secret": ["V1BMX0FQMS5FSHFHeDRUOGZ", "SM0k1cjM3LnFHNU45QT09"],
},
"telegram": {
"api_id": ["MzQyNDc4MTc="],
"api_hash": ["N2Q5ZjkzN2ZkNzAzYTI0NTkyMDQzNGM2YjU5MDE4OGE="]
}
}


Expand Down
160 changes: 160 additions & 0 deletions agent_core/core/impl/llm/errors.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
# -*- coding: utf-8 -*-
"""
LLM Error Classification Module.

Provides user-friendly error messages for LLM-related failures.
Uses proper exception types and HTTP status codes - no string pattern matching.
"""

from __future__ import annotations

from typing import Optional

# Import provider exception types
try:
import openai
except ImportError:
openai = None

try:
import anthropic
except ImportError:
anthropic = None

try:
import requests
except ImportError:
requests = None


# User-friendly messages
MSG_AUTH = "Unable to connect to AI service. Please check your API key in Settings."
MSG_MODEL = "The selected AI model is not available. Please check your model settings."
MSG_CONFIG = "AI service configuration error. The selected model may not support required features."
MSG_RATE_LIMIT = "AI service is rate-limited. Please wait a moment and try again."
MSG_SERVICE = "AI service is temporarily unavailable. Please try again later."
MSG_CONNECTION = "Unable to reach AI service. Please check your internet connection."
MSG_GENERIC = "An error occurred with the AI service. Please check your LLM configuration."


def classify_llm_error(error: Exception) -> str:
"""Classify an LLM error and return a user-friendly message.

Uses exception types and HTTP status codes for classification.

Args:
error: The exception from the LLM call.

Returns:
A user-friendly error message.
"""
# Check OpenAI exceptions
if openai is not None:
msg = _classify_openai_error(error)
if msg:
return msg

# Check Anthropic exceptions
if anthropic is not None:
msg = _classify_anthropic_error(error)
if msg:
return msg

# Check requests exceptions (BytePlus, remote/Ollama)
if requests is not None:
msg = _classify_requests_error(error)
if msg:
return msg

# Check for status_code attribute on any exception
status_code = _get_status_code(error)
if status_code:
return _message_from_status_code(status_code)

# Generic fallback
return MSG_GENERIC


def _classify_openai_error(error: Exception) -> Optional[str]:
"""Classify OpenAI SDK exceptions."""
if isinstance(error, openai.AuthenticationError):
return MSG_AUTH
if isinstance(error, openai.PermissionDeniedError):
return MSG_AUTH
if isinstance(error, openai.NotFoundError):
return MSG_MODEL
if isinstance(error, openai.BadRequestError):
return MSG_CONFIG
if isinstance(error, openai.RateLimitError):
return MSG_RATE_LIMIT
if isinstance(error, openai.InternalServerError):
return MSG_SERVICE
if isinstance(error, openai.APIConnectionError):
return MSG_CONNECTION
if isinstance(error, openai.APITimeoutError):
return MSG_CONNECTION
if isinstance(error, openai.APIStatusError):
return _message_from_status_code(error.status_code)
return None


def _classify_anthropic_error(error: Exception) -> Optional[str]:
"""Classify Anthropic SDK exceptions."""
if isinstance(error, anthropic.AuthenticationError):
return MSG_AUTH
if isinstance(error, anthropic.PermissionDeniedError):
return MSG_AUTH
if isinstance(error, anthropic.NotFoundError):
return MSG_MODEL
if isinstance(error, anthropic.BadRequestError):
return MSG_CONFIG
if isinstance(error, anthropic.RateLimitError):
return MSG_RATE_LIMIT
if isinstance(error, anthropic.InternalServerError):
return MSG_SERVICE
if isinstance(error, anthropic.APIConnectionError):
return MSG_CONNECTION
if isinstance(error, anthropic.APITimeoutError):
return MSG_CONNECTION
if isinstance(error, anthropic.APIStatusError):
return _message_from_status_code(error.status_code)
return None


def _classify_requests_error(error: Exception) -> Optional[str]:
"""Classify requests library exceptions (for BytePlus/Ollama)."""
if isinstance(error, requests.exceptions.HTTPError):
if error.response is not None:
return _message_from_status_code(error.response.status_code)
return MSG_SERVICE
if isinstance(error, requests.exceptions.ConnectionError):
return MSG_CONNECTION
if isinstance(error, requests.exceptions.Timeout):
return MSG_CONNECTION
return None


def _get_status_code(error: Exception) -> Optional[int]:
"""Extract HTTP status code from exception if available."""
# Check for status_code attribute
if hasattr(error, "status_code"):
return getattr(error, "status_code", None)
# Check for response.status_code (requests-style)
if hasattr(error, "response") and hasattr(error.response, "status_code"):
return error.response.status_code
return None


def _message_from_status_code(status_code: int) -> str:
"""Map HTTP status code to user-friendly message."""
if status_code == 401 or status_code == 403:
return MSG_AUTH
if status_code == 404:
return MSG_MODEL
if status_code == 400:
return MSG_CONFIG
if status_code == 429:
return MSG_RATE_LIMIT
if 500 <= status_code < 600:
return MSG_SERVICE
return MSG_GENERIC
39 changes: 34 additions & 5 deletions agent_core/core/impl/llm/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,38 @@
from agent_core.utils.logger import logger


# Models that do NOT support assistant message prefill
# These require output_config.format for structured JSON output
_ANTHROPIC_NO_PREFILL_PATTERNS = (
"claude-opus-4", # Claude Opus 4.x (4.5, 4.6, etc.)
"claude-sonnet-4", # Claude Sonnet 4.x (4.5, 4.6, etc.)
"claude-3-7", # Claude 3.7 Sonnet
"claude-3.7", # Alternative naming
)


def _model_supports_prefill(model: str) -> bool:
"""Check if an Anthropic model supports assistant message prefill.

Newer Claude models (4.x, 3.7) do not support prefilling.
Older models (3.5 Sonnet, 3 Opus) still support it.

Args:
model: The model identifier string.

Returns:
True if the model supports prefill, False otherwise.
"""
if not model:
return True # Default to supporting prefill for safety

model_lower = model.lower()
for pattern in _ANTHROPIC_NO_PREFILL_PATTERNS:
if pattern in model_lower:
return False
return True


class LLMInterface:
"""LLM interface with multi-provider support and hook-based customization.

Expand Down Expand Up @@ -1515,14 +1547,12 @@ def _generate_anthropic(
if not self._anthropic_client:
raise RuntimeError("Anthropic client was not initialised.")

# Build the message with optional system prompt
# Use JSON prefilling to enforce JSON output
# Build the message - rely on system prompt for JSON formatting
message_kwargs: Dict[str, Any] = {
"model": self.model,
"max_tokens": self.max_tokens,
"messages": [
{"role": "user", "content": user_prompt},
{"role": "assistant", "content": "{"}, # JSON prefilling
],
}

Expand Down Expand Up @@ -1561,8 +1591,7 @@ def _generate_anthropic(
if block.type == "text":
content += block.text

# Prepend the prefilled '{' to complete JSON
content = "{" + content.strip()
content = content.strip()

# Token usage from Anthropic response
token_count_input = response.usage.input_tokens
Expand Down
6 changes: 5 additions & 1 deletion app/agent_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@

from app.internal_action_interface import InternalActionInterface
from app.llm import LLMInterface, LLMCallType
from agent_core.core.impl.llm.errors import classify_llm_error
from app.vlm_interface import VLMInterface
from app.database_interface import DatabaseInterface
from app.logger import logger
Expand Down Expand Up @@ -1159,12 +1160,15 @@ async def _handle_react_error(
if not session_to_use or not self.event_stream_manager:
return

# Get user-friendly error message
user_message = classify_llm_error(error)

try:
logger.debug("[REACT ERROR] Logging to event stream")
self.event_stream_manager.log(
"error",
f"[REACT] {type(error).__name__}: {error}\n{tb}",
display_message=None,
display_message=user_message,
task_id=session_to_use,
)
self.state_manager.bump_event_stream()
Expand Down
6 changes: 3 additions & 3 deletions app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def reload_settings() -> Dict[str, Any]:

# Google (PKCE - only client_id required, secret kept for backwards compatibility)
GOOGLE_CLIENT_ID: str = get_credential("google", "client_id", "GOOGLE_CLIENT_ID")
GOOGLE_CLIENT_SECRET: str = os.environ.get("GOOGLE_CLIENT_SECRET", "")
GOOGLE_CLIENT_SECRET: str = get_credential("google", "client_secret", "GOOGLE_CLIENT_SECRET")

# LinkedIn (requires both client_id and client_secret)
LINKEDIN_CLIENT_ID: str = get_credential("linkedin", "client_id", "LINKEDIN_CLIENT_ID")
Expand All @@ -223,8 +223,8 @@ def reload_settings() -> Dict[str, Any]:
TELEGRAM_SHARED_BOT_USERNAME: str = os.environ.get("TELEGRAM_SHARED_BOT_USERNAME", "")

# Telegram API credentials for MTProto user login (from https://my.telegram.org)
TELEGRAM_API_ID: str = os.environ.get("TELEGRAM_API_ID", "")
TELEGRAM_API_HASH: str = os.environ.get("TELEGRAM_API_HASH", "")
TELEGRAM_API_ID: str = get_credential("telegram", "api_id", "TELEGRAM_API_ID")
TELEGRAM_API_HASH: str = get_credential("telegram", "api_hash", "TELEGRAM_API_HASH")

# Notion (requires both client_id and client_secret - no PKCE support)
NOTION_SHARED_CLIENT_ID: str = get_credential("notion", "client_id", "NOTION_SHARED_CLIENT_ID")
Expand Down
2 changes: 1 addition & 1 deletion app/config/mcp_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -1262,7 +1262,7 @@
"AMADEUS_API_KEY": "",
"AMADEUS_API_SECRET": ""
},
"enabled": false
"enabled": true
},
{
"name": "booking-mcp",
Expand Down
22 changes: 22 additions & 0 deletions app/ui_layer/adapters/browser_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -1310,6 +1310,28 @@ async def _handle_onboarding_step_submit(self, value: Any) -> None:
})
return

# For API key step, test the connection before proceeding
step = controller.get_current_step()
if step.name == "api_key":
provider = controller.get_collected_data().get("provider", "openai")
# Remote/Ollama provider doesn't require API key validation
if provider != "remote" and value:
test_result = test_connection(
provider=provider,
api_key=value,
)
if not test_result.get("success"):
error_msg = test_result.get("error") or test_result.get("message") or "Connection test failed"
await self._broadcast({
"type": "onboarding_submit",
"data": {
"success": False,
"error": f"Invalid API key: {error_msg}",
"index": controller.current_step_index,
},
})
return

# Submit the value
controller.submit_step_value(value)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,11 @@ export function OnboardingPage() {
icon={<ChevronRight size={16} />}
iconPosition="right"
>
{isLastStep ? 'Finish' : 'Next'}
{onboardingLoading && onboardingStep?.name === 'api_key'
? 'Testing API Key...'
: isLastStep
? 'Finish'
: 'Next'}
</Button>
</div>
</div>
Expand Down
28 changes: 0 additions & 28 deletions test_output.py

This file was deleted.