From 3ea7b6a8a76e6b54553d6521aa12b8afae942436 Mon Sep 17 00:00:00 2001 From: "Claude 2.0" Date: Wed, 1 Apr 2026 02:36:41 +0800 Subject: [PATCH 1/6] fix: strip variant from title generation to prevent effort parameter leakage When generating session titles, the user's model variant (e.g., "max" which maps to output_config.effort) was leaking into the LLM.stream call for the title agent. Since the title agent uses a small model (e.g., haiku) that does not support the effort parameter, this caused a 400 error from the Anthropic API that was silently swallowed by Effect.ignore. This fix strips the variant from the user info when calling LLM.stream for title generation, ensuring the small model receives only compatible parameters. Fixes #20269 --- packages/opencode/src/session/prompt.ts | 2 +- packages/opencode/test/session/prompt.test.ts | 77 +++++++++++++++++++ 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts index dbf815bd6d79..5d612c235f66 100644 --- a/packages/opencode/src/session/prompt.ts +++ b/packages/opencode/src/session/prompt.ts @@ -215,7 +215,7 @@ export namespace SessionPrompt { : await MessageV2.toModelMessages(context, mdl) const result = await LLM.stream({ agent: ag, - user: firstInfo, + user: { ...firstInfo, variant: undefined }, system: [], small: true, tools: {}, diff --git a/packages/opencode/test/session/prompt.test.ts b/packages/opencode/test/session/prompt.test.ts index 51d2e11941ae..d5ddc06b9823 100644 --- a/packages/opencode/test/session/prompt.test.ts +++ b/packages/opencode/test/session/prompt.test.ts @@ -516,3 +516,80 @@ describe("session.agent-resolution", () => { }) }, 30000) }) + +describe("session.title generation", () => { + test("strips variant from title generation to avoid effort parameter leakage", async () => { + let titleRequestCaptured = false + let titleRequestHasEffort = false + const server = Bun.serve({ + port: 0, + fetch(req) { + const url = new URL(req.url) + if (!url.pathname.endsWith("/chat/completions")) { + return new Response("not found", { status: 404 }) + } + const body = JSON.parse(await req.text()) + const isTitleRequest = body.messages?.some((m: any) => m.content?.includes("Generate a title")) + if (isTitleRequest) { + titleRequestCaptured = true + titleRequestHasEffort = !!body.output_config?.effort || !!body.reasoning_effort + } + return new Response(chat("Test session title"), { + status: 200, + headers: { "Content-Type": "text/event-stream" }, + }) + }, + }) + + try { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + enabled_providers: ["alibaba"], + provider: { + alibaba: { + options: { + apiKey: "test-key", + baseURL: `${server.url.origin}/v1`, + }, + }, + }, + agent: { + build: { + model: "alibaba/qwen-plus", + variant: "max", + }, + }, + }), + ) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const session = await Session.create({}) + const result = await SessionPrompt.prompt({ + sessionID: session.id, + agent: "build", + variant: "max", + parts: [{ type: "text", text: "Hello, help me with something" }], + }) + + expect(result.info.role).toBe("assistant") + await new Promise((r) => setTimeout(r, 500)) + expect(titleRequestCaptured).toBe(true) + expect(titleRequestHasEffort).toBe(false) + const updated = await Session.get(session.id) + expect(updated.title).toBe("Test session title") + }, + }) + } finally { + server.stop(true) + } + }) +}) From eac82dccc577ca9a004aa47a99a29b03759a8257 Mon Sep 17 00:00:00 2001 From: "Claude 2.0" Date: Wed, 1 Apr 2026 03:15:42 +0800 Subject: [PATCH 2/6] fix: respect useCompletionUrls for custom @ai-sdk/azure providers When a custom provider uses npm: "@ai-sdk/azure", the getLanguage function was falling back to sdk.languageModel() which bypasses the useCompletionUrls option check. This caused the Azure provider to use the /responses endpoint instead of /chat/completions even when useCompletionUrls was set to true. The fix adds a check in getLanguage to apply Azure-specific logic for any model using @ai-sdk/azure, ensuring useCompletionUrls is respected for both built-in and custom Azure providers. Fixes #20287 --- packages/opencode/src/provider/provider.ts | 19 ++++++--- .../opencode/test/provider/provider.test.ts | 42 +++++++++++++++++++ 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index c6784f450244..881a5c9dea50 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -1416,12 +1416,21 @@ export namespace Provider { const sdk = await resolveSDK(model, s) try { + const mergedOptions = { ...provider.options, ...model.options } const language = s.modelLoaders[model.providerID] - ? await s.modelLoaders[model.providerID](sdk, model.api.id, { - ...provider.options, - ...model.options, - }) - : sdk.languageModel(model.api.id) + ? await s.modelLoaders[model.providerID](sdk, model.api.id, mergedOptions) + : (() => { + // For custom providers using @ai-sdk/azure, apply azure-specific logic + if (model.api.npm === "@ai-sdk/azure") { + if (useLanguageModel(sdk)) return sdk.languageModel(model.api.id) + if (mergedOptions["useCompletionUrls"]) { + return sdk.chat(model.api.id) + } else { + return sdk.responses(model.api.id) + } + } + return sdk.languageModel(model.api.id) + })() s.models.set(key, language) return language } catch (e) { diff --git a/packages/opencode/test/provider/provider.test.ts b/packages/opencode/test/provider/provider.test.ts index 72ba9dba5a5c..641dff30420c 100644 --- a/packages/opencode/test/provider/provider.test.ts +++ b/packages/opencode/test/provider/provider.test.ts @@ -2282,3 +2282,45 @@ test("cloudflare-ai-gateway forwards config metadata options", async () => { }, }) }) + +test("custom provider using @ai-sdk/azure respects useCompletionUrls option", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + foundary: { + npm: "@ai-sdk/azure", + options: { + baseURL: "https://custom-domain/openai", + apiKey: "test-key", + apiVersion: "2025-04-01-preview", + useDeploymentBasedUrls: true, + useCompletionUrls: true, + }, + models: { + "gpt-5.4": { + name: "GPT-5.4", + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers[ProviderID.make("foundary")]).toBeDefined() + expect(providers[ProviderID.make("foundary")].options.useCompletionUrls).toBe(true) + const model = await Provider.getModel(ProviderID.make("foundary"), ModelID.make("gpt-5.4")) + expect(model).toBeDefined() + // Verify the model has the correct npm package + expect(model.api.npm).toBe("@ai-sdk/azure") + }, + }) +}) From b2eab3ca0b0b217c1fa0c41ef49d017ce6ca7e32 Mon Sep 17 00:00:00 2001 From: "Claude 2.0" Date: Wed, 1 Apr 2026 04:07:20 +0800 Subject: [PATCH 3/6] fix: update MCP protocol version to 2025-06-18 for GitLab compatibility The protocol version 2024-11-05 is deprecated and rejected by GitLab 18.9+. Updated to 2025-06-18 which is supported by GitLab MCP server. --- packages/opencode/src/cli/cmd/mcp.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/opencode/src/cli/cmd/mcp.ts b/packages/opencode/src/cli/cmd/mcp.ts index c45b9e55d0f8..c10ca32a969e 100644 --- a/packages/opencode/src/cli/cmd/mcp.ts +++ b/packages/opencode/src/cli/cmd/mcp.ts @@ -660,7 +660,7 @@ export const McpDebugCommand = cmd({ jsonrpc: "2.0", method: "initialize", params: { - protocolVersion: "2024-11-05", + protocolVersion: "2025-06-18", capabilities: {}, clientInfo: { name: "opencode-debug", version: Installation.VERSION }, }, From 382631a61cbe8d1e43fe63816521ac86e23b36e8 Mon Sep 17 00:00:00 2001 From: "Claude 2.0" Date: Wed, 1 Apr 2026 04:11:13 +0800 Subject: [PATCH 4/6] fix: send configured headers during MCP OAuth flow The mcp auth and mcp debug commands were not forwarding headers configured under mcp..headers during the OAuth flow, causing 404 errors instead of 401 from servers requiring custom headers for routing. Fixed three code paths: 1. startAuth in src/mcp/index.ts - added requestInit with headers 2. McpDebugCommand fetch probe - added headers to fetch call 3. McpDebugCommand OAuth transport - added requestInit with headers Fixes #20286 --- packages/opencode/src/cli/cmd/mcp.ts | 2 ++ packages/opencode/src/mcp/index.ts | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/packages/opencode/src/cli/cmd/mcp.ts b/packages/opencode/src/cli/cmd/mcp.ts index c10ca32a969e..2763d6a26bd8 100644 --- a/packages/opencode/src/cli/cmd/mcp.ts +++ b/packages/opencode/src/cli/cmd/mcp.ts @@ -655,6 +655,7 @@ export const McpDebugCommand = cmd({ headers: { "Content-Type": "application/json", Accept: "application/json, text/event-stream", + ...(serverConfig.headers ?? {}), }, body: JSON.stringify({ jsonrpc: "2.0", @@ -699,6 +700,7 @@ export const McpDebugCommand = cmd({ // Try creating transport with auth provider to trigger discovery const transport = new StreamableHTTPClientTransport(new URL(serverConfig.url), { authProvider, + requestInit: serverConfig.headers ? { headers: serverConfig.headers } : undefined, }) try { diff --git a/packages/opencode/src/mcp/index.ts b/packages/opencode/src/mcp/index.ts index e3bf4cac0688..8c0e43d574a6 100644 --- a/packages/opencode/src/mcp/index.ts +++ b/packages/opencode/src/mcp/index.ts @@ -739,7 +739,10 @@ export namespace MCP { }, ) - const transport = new StreamableHTTPClientTransport(new URL(mcpConfig.url), { authProvider }) + const transport = new StreamableHTTPClientTransport(new URL(mcpConfig.url), { + authProvider, + requestInit: mcpConfig.headers ? { headers: mcpConfig.headers } : undefined, + }) return yield* Effect.tryPromise({ try: () => { From 7e869d3a3d6694a9c42325295f25a1e4f2932b01 Mon Sep 17 00:00:00 2001 From: "Claude 2.0" Date: Wed, 1 Apr 2026 04:13:13 +0800 Subject: [PATCH 5/6] fix: use openai key for Azure provider options @ai-sdk/azure delegates to OpenAIChatLanguageModel which hardcodes provider: 'openai' in parseProviderOptions. Model options like reasoningEffort were being placed under 'azure' key and silently ignored. Now using 'openai' key for @ai-sdk/azure provider options. Fixes #20275 --- packages/opencode/src/provider/transform.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index f651a5b91aaf..1b14b566a646 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -935,7 +935,9 @@ export namespace ProviderTransform { } const key = sdkKey(model.api.npm) ?? model.providerID - return { [key]: options } + // @ai-sdk/azure delegates to OpenAIChatLanguageModel which expects "openai" key + const azureKey = model.api.npm === "@ai-sdk/azure" ? "openai" : key + return { [azureKey]: options } } export function maxOutputTokens(model: Provider.Model): number { From 5f20bd9bd66be99e1882d0bb880f0b523f63338f Mon Sep 17 00:00:00 2001 From: "Claude 2.0" Date: Wed, 1 Apr 2026 04:17:24 +0800 Subject: [PATCH 6/6] fix: remove 30-day limit from TUI session list The TUI was filtering sessions to only show those from the last 30 days, causing older sessions to be missing from the session list dialog. Removed the hardcoded start date filter to show all sessions like the CLI. Fixes #20238 --- packages/opencode/src/cli/cmd/tui/context/sync.tsx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/opencode/src/cli/cmd/tui/context/sync.tsx b/packages/opencode/src/cli/cmd/tui/context/sync.tsx index 3b296a927aa4..f553b8bc4635 100644 --- a/packages/opencode/src/cli/cmd/tui/context/sync.tsx +++ b/packages/opencode/src/cli/cmd/tui/context/sync.tsx @@ -357,9 +357,8 @@ export const { use: useSync, provider: SyncProvider } = createSimpleContext({ async function bootstrap() { console.log("bootstrapping") - const start = Date.now() - 30 * 24 * 60 * 60 * 1000 const sessionListPromise = sdk.client.session - .list({ start: start }) + .list({}) .then((x) => (x.data ?? []).toSorted((a, b) => a.id.localeCompare(b.id))) // blocking - include session.list when continuing a session