diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 8aab0d4151d8..56401069828d 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -706,6 +706,22 @@ export namespace ProviderTransform { export function providerOptions(model: Provider.Model, options: { [x: string]: any }) { const key = sdkKey(model.api.npm) ?? model.providerID + + // The @ai-sdk/openai-compatible SDK does not convert camelCase to snake_case + // for custom providerOptions (only for its own schema fields like reasoningEffort). + // When thinking.budgetTokens is set, we must convert it to budget_tokens so that + // OpenAI-compatible proxies (LiteLLM, OpenRouter, etc.) can forward it correctly. + if (model.api.npm === "@ai-sdk/openai-compatible" && options?.thinking?.budgetTokens != null) { + const { budgetTokens, ...thinkingRest } = options.thinking + options = { + ...options, + thinking: { + ...thinkingRest, + budget_tokens: budgetTokens, + }, + } + } + return { [key]: options } } diff --git a/packages/opencode/test/provider/transform.test.ts b/packages/opencode/test/provider/transform.test.ts index 0e0bb440aa81..3ff1c1495b6a 100644 --- a/packages/opencode/test/provider/transform.test.ts +++ b/packages/opencode/test/provider/transform.test.ts @@ -267,6 +267,71 @@ describe("ProviderTransform.maxOutputTokens", () => { expect(result).toBe(OUTPUT_TOKEN_MAX) }) }) + +}) + +describe("ProviderTransform.providerOptions - openai-compatible snake_case conversion", () => { + test("converts thinking.budgetTokens to budget_tokens for openai-compatible", () => { + const model = { + providerID: "litellm", + api: { npm: "@ai-sdk/openai-compatible" }, + } as any + const options = { + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + } + const result = ProviderTransform.providerOptions(model, options) + expect(result.litellm.thinking.budget_tokens).toBe(16000) + expect(result.litellm.thinking.budgetTokens).toBeUndefined() + expect(result.litellm.thinking.type).toBe("enabled") + }) + + test("does not convert for @ai-sdk/anthropic (SDK handles it)", () => { + const model = { + providerID: "anthropic", + api: { npm: "@ai-sdk/anthropic" }, + } as any + const options = { + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + } + const result = ProviderTransform.providerOptions(model, options) + expect(result.anthropic.thinking.budgetTokens).toBe(16000) + expect(result.anthropic.thinking.budget_tokens).toBeUndefined() + }) + + test("passes through options without thinking unchanged", () => { + const model = { + providerID: "litellm", + api: { npm: "@ai-sdk/openai-compatible" }, + } as any + const options = { + reasoningEffort: "high", + } + const result = ProviderTransform.providerOptions(model, options) + expect(result.litellm.reasoningEffort).toBe("high") + expect(result.litellm.thinking).toBeUndefined() + }) + + test("passes through thinking without budgetTokens unchanged", () => { + const model = { + providerID: "litellm", + api: { npm: "@ai-sdk/openai-compatible" }, + } as any + const options = { + thinking: { + type: "enabled", + budget_tokens: 16000, + }, + } + const result = ProviderTransform.providerOptions(model, options) + expect(result.litellm.thinking.budget_tokens).toBe(16000) + expect(result.litellm.thinking.type).toBe("enabled") + }) }) describe("ProviderTransform.schema - gemini array items", () => {