diff --git a/LocalAgent.ApiService/Program.cs b/LocalAgent.ApiService/Program.cs
index b2d74a2..b66808d 100644
--- a/LocalAgent.ApiService/Program.cs
+++ b/LocalAgent.ApiService/Program.cs
@@ -29,7 +29,33 @@
if (aiConfig.IsLocalProvider())
{
- builder.AddOllamaApiClient("ollamaModel")
+ // The connection name used for Ollama
+ const string OllamaConnectionName = "ollamaModel";
+
+ // Configure increased timeout for local Ollama LLM requests BEFORE creating the client
+ // Local LLMs can be slower, especially on low-performance machines
+ // The HttpClient name follows the pattern: {connectionName}_httpClient
+ const int MinimumTimeoutSeconds = 30; // Minimum 30 seconds for LLM operations
+ var timeoutSeconds = Math.Max(MinimumTimeoutSeconds, aiConfig.TimeoutSeconds);
+
+ // IMPORTANT: We do NOT use AddStandardResilienceHandler for this client because:
+ // 1. The global AddStandardResilienceHandler cannot be effectively overridden per-client
+ // 2. It would stack with the global handler causing whichever timeout is shorter to win
+ // 3. For streaming LLM responses, we need a much longer timeout than the default 10 seconds
+ builder.Services.AddHttpClient($"{OllamaConnectionName}_httpClient", client =>
+ {
+ // Set a long timeout for LLM operations (streaming responses can take a while)
+ client.Timeout = TimeSpan.FromSeconds(timeoutSeconds);
+ })
+ // Remove any additional handlers that were added by ConfigureHttpClientDefaults
+ .ConfigureAdditionalHttpMessageHandlers((handlers, _) =>
+ {
+ // Clear the standard resilience handler that was added by defaults
+ // This allows us to have a custom timeout for Ollama without interference
+ handlers.Clear();
+ });
+
+ builder.AddOllamaApiClient(OllamaConnectionName)
.AddChatClient()
.UseFunctionInvocation()
.UseOpenTelemetry(configure: t => t.EnableSensitiveData = true);
diff --git a/LocalAgent.AppHost/appsettings.json b/LocalAgent.AppHost/appsettings.json
index 4c386d1..c2d6967 100644
--- a/LocalAgent.AppHost/appsettings.json
+++ b/LocalAgent.AppHost/appsettings.json
@@ -11,6 +11,7 @@
},
"AIConfig": {
"Provider": "Local",
- "ModelId": "llama32"
+ "ModelId": "llama32",
+ "TimeoutSeconds": 90
}
}
diff --git a/LocalAgent.ServiceDefaults/AIConfig.cs b/LocalAgent.ServiceDefaults/AIConfig.cs
index 7405d76..4863740 100644
--- a/LocalAgent.ServiceDefaults/AIConfig.cs
+++ b/LocalAgent.ServiceDefaults/AIConfig.cs
@@ -12,6 +12,14 @@ public class AIConfig
public string ModelId { get; set; } = string.Empty;
+ ///
+ /// Timeout in seconds for HTTP requests to the AI service.
+ /// Default is 90 seconds to accommodate slower local LLMs while maintaining reasonable responsiveness.
+ /// Note: This timeout applies specifically to HTTP client requests. Some AI providers may have
+ /// their own internal timeout mechanisms that are independent of this setting.
+ ///
+ public int TimeoutSeconds { get; set; } = 90;
+
///
/// Determines if the Azure provider is configured
///