diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 8091f731f0a2..b4eb14a2ffa7 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -171,7 +171,7 @@ export namespace ProviderTransform { return msgs } - function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] { + function applyCaching(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] { const system = msgs.filter((msg) => msg.role === "system").slice(0, 2) const final = msgs.filter((msg) => msg.role !== "system").slice(-2) @@ -194,7 +194,7 @@ export namespace ProviderTransform { } for (const msg of unique([...system, ...final])) { - const useMessageLevelOptions = providerID === "anthropic" || providerID.includes("bedrock") + const useMessageLevelOptions = model.providerID === "anthropic" || model.providerID.includes("bedrock") const shouldUseContentOptions = !useMessageLevelOptions && Array.isArray(msg.content) && msg.content.length > 0 if (shouldUseContentOptions) { @@ -253,14 +253,15 @@ export namespace ProviderTransform { msgs = unsupportedParts(msgs, model) msgs = normalizeMessages(msgs, model, options) if ( - model.providerID === "anthropic" || - model.api.id.includes("anthropic") || - model.api.id.includes("claude") || - model.id.includes("anthropic") || - model.id.includes("claude") || - model.api.npm === "@ai-sdk/anthropic" + (model.providerID === "anthropic" || + model.api.id.includes("anthropic") || + model.api.id.includes("claude") || + model.id.includes("anthropic") || + model.id.includes("claude") || + model.api.npm === "@ai-sdk/anthropic") && + model.api.npm !== "@ai-sdk/gateway" ) { - msgs = applyCaching(msgs, model.providerID) + msgs = applyCaching(msgs, model) } // Remap providerOptions keys from stored providerID to expected SDK key @@ -363,8 +364,50 @@ export namespace ProviderTransform { if (!model.id.includes("gpt") && !model.id.includes("gemini-3")) return {} return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoning: { effort } }])) - // TODO: YOU CANNOT SET max_tokens if this is set!!! case "@ai-sdk/gateway": + if (model.id.includes("anthropic")) { + return { + high: { + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + }, + max: { + thinking: { + type: "enabled", + budgetTokens: 31999, + }, + }, + } + } + if (model.id.includes("google")) { + if (id.includes("2.5")) { + return { + high: { + thinkingConfig: { + includeThoughts: true, + thinkingBudget: 16000, + }, + }, + max: { + thinkingConfig: { + includeThoughts: true, + thinkingBudget: 24576, + }, + }, + } + } + return Object.fromEntries( + ["low", "high"].map((effort) => [ + effort, + { + includeThoughts: true, + thinkingLevel: effort, + }, + ]), + ) + } return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }])) case "@ai-sdk/github-copilot": @@ -720,6 +763,12 @@ export namespace ProviderTransform { result["promptCacheKey"] = input.sessionID } + if (input.model.api.npm === "@ai-sdk/gateway") { + result["gateway"] = { + caching: "auto", + } + } + return result } @@ -754,6 +803,34 @@ export namespace ProviderTransform { } export function providerOptions(model: Provider.Model, options: { [x: string]: any }) { + if (model.api.npm === "@ai-sdk/gateway") { + // Gateway providerOptions are split across two namespaces: + // - `gateway`: gateway-native routing/caching controls + // - ``: provider-specific model options (anthropic/openai/...) + // We keep `gateway` as-is and route every other top-level option under the + // model-derived upstream slug so variants/options can stay flat internally. + const i = model.api.id.indexOf("/") + const slug = i > 0 ? model.api.id.slice(0, i) : undefined + const gateway = options.gateway + const rest = Object.fromEntries(Object.entries(options).filter(([k]) => k !== "gateway")) + const has = Object.keys(rest).length > 0 + + const result: Record = {} + if (gateway !== undefined) result.gateway = gateway + + if (has) { + if (slug) { + result[slug] = rest + } else if (gateway && typeof gateway === "object" && !Array.isArray(gateway)) { + result.gateway = { ...gateway, ...rest } + } else { + result.gateway = rest + } + } + + return result + } + const key = sdkKey(model.api.npm) ?? model.providerID return { [key]: options } } diff --git a/packages/opencode/test/provider/transform.test.ts b/packages/opencode/test/provider/transform.test.ts index 02bb5278fc7d..99acfcc81763 100644 --- a/packages/opencode/test/provider/transform.test.ts +++ b/packages/opencode/test/provider/transform.test.ts @@ -175,6 +175,174 @@ describe("ProviderTransform.options - gpt-5 textVerbosity", () => { }) }) +describe("ProviderTransform.options - gateway", () => { + const sessionID = "test-session-123" + + const createModel = (id: string) => + ({ + id, + providerID: "vercel", + api: { + id, + url: "https://ai-gateway.vercel.sh/v3/ai", + npm: "@ai-sdk/gateway", + }, + name: id, + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + cost: { + input: 0.001, + output: 0.002, + cache: { read: 0.0001, write: 0.0002 }, + }, + limit: { + context: 200_000, + output: 8192, + }, + status: "active", + options: {}, + headers: {}, + release_date: "2024-01-01", + }) as any + + test("puts gateway defaults under gateway key", () => { + const model = createModel("anthropic/claude-sonnet-4") + const result = ProviderTransform.options({ model, sessionID, providerOptions: {} }) + expect(result).toEqual({ + gateway: { + caching: "auto", + }, + }) + }) +}) + +describe("ProviderTransform.providerOptions", () => { + const createModel = (overrides: Partial = {}) => + ({ + id: "test/test-model", + providerID: "test", + api: { + id: "test-model", + url: "https://api.test.com", + npm: "@ai-sdk/openai", + }, + name: "Test Model", + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + cost: { + input: 0.001, + output: 0.002, + cache: { read: 0.0001, write: 0.0002 }, + }, + limit: { + context: 200_000, + output: 64_000, + }, + status: "active", + options: {}, + headers: {}, + release_date: "2024-01-01", + ...overrides, + }) as any + + test("uses sdk key for non-gateway models", () => { + const model = createModel({ + providerID: "my-bedrock", + api: { + id: "anthropic.claude-sonnet-4", + url: "https://bedrock.aws", + npm: "@ai-sdk/amazon-bedrock", + }, + }) + + expect(ProviderTransform.providerOptions(model, { cachePoint: { type: "default" } })).toEqual({ + bedrock: { cachePoint: { type: "default" } }, + }) + }) + + test("uses gateway model provider slug for gateway models", () => { + const model = createModel({ + providerID: "vercel", + api: { + id: "anthropic/claude-sonnet-4", + url: "https://ai-gateway.vercel.sh/v3/ai", + npm: "@ai-sdk/gateway", + }, + }) + + expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({ + anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } }, + }) + }) + + test("falls back to gateway key when gateway api id is unscoped", () => { + const model = createModel({ + id: "anthropic/claude-sonnet-4", + providerID: "vercel", + api: { + id: "claude-sonnet-4", + url: "https://ai-gateway.vercel.sh/v3/ai", + npm: "@ai-sdk/gateway", + }, + }) + + expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({ + gateway: { thinking: { type: "enabled", budgetTokens: 12_000 } }, + }) + }) + + test("splits gateway routing options from provider-specific options", () => { + const model = createModel({ + providerID: "vercel", + api: { + id: "anthropic/claude-sonnet-4", + url: "https://ai-gateway.vercel.sh/v3/ai", + npm: "@ai-sdk/gateway", + }, + }) + + expect( + ProviderTransform.providerOptions(model, { + gateway: { order: ["vertex", "anthropic"] }, + thinking: { type: "enabled", budgetTokens: 12_000 }, + }), + ).toEqual({ + gateway: { order: ["vertex", "anthropic"] }, + anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } }, + } as any) + }) + + test("falls back to gateway key when model id has no provider slug", () => { + const model = createModel({ + id: "claude-sonnet-4", + providerID: "vercel", + api: { + id: "claude-sonnet-4", + url: "https://ai-gateway.vercel.sh/v3/ai", + npm: "@ai-sdk/gateway", + }, + }) + + expect(ProviderTransform.providerOptions(model, { reasoningEffort: "high" })).toEqual({ + gateway: { reasoningEffort: "high" }, + }) + }) +}) + describe("ProviderTransform.schema - gemini array items", () => { test("adds missing items for array properties", () => { const geminiModel = { @@ -1232,6 +1400,105 @@ describe("ProviderTransform.message - claude w/bedrock custom inference profile" }) }) +describe("ProviderTransform.message - cache control on gateway", () => { + const createModel = (overrides: Partial = {}) => + ({ + id: "anthropic/claude-sonnet-4", + providerID: "vercel", + api: { + id: "anthropic/claude-sonnet-4", + url: "https://ai-gateway.vercel.sh/v3/ai", + npm: "@ai-sdk/gateway", + }, + name: "Claude Sonnet 4", + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } }, + limit: { context: 200_000, output: 8192 }, + status: "active", + options: {}, + headers: {}, + ...overrides, + }) as any + + test("gateway does not set cache control for anthropic models", () => { + const model = createModel() + const msgs = [ + { + role: "system", + content: [{ type: "text", text: "You are a helpful assistant" }], + }, + { + role: "user", + content: "Hello", + }, + ] as any[] + + const result = ProviderTransform.message(msgs, model, {}) as any[] + + expect(result[0].content[0].providerOptions).toBeUndefined() + expect(result[0].providerOptions).toBeUndefined() + }) + + test("non-gateway anthropic keeps existing cache control behavior", () => { + const model = createModel({ + providerID: "anthropic", + api: { + id: "claude-sonnet-4", + url: "https://api.anthropic.com", + npm: "@ai-sdk/anthropic", + }, + }) + const msgs = [ + { + role: "system", + content: "You are a helpful assistant", + }, + { + role: "user", + content: "Hello", + }, + ] as any[] + + const result = ProviderTransform.message(msgs, model, {}) as any[] + + expect(result[0].providerOptions).toEqual({ + anthropic: { + cacheControl: { + type: "ephemeral", + }, + }, + openrouter: { + cacheControl: { + type: "ephemeral", + }, + }, + bedrock: { + cachePoint: { + type: "default", + }, + }, + openaiCompatible: { + cache_control: { + type: "ephemeral", + }, + }, + copilot: { + copilot_cache_control: { + type: "ephemeral", + }, + }, + }) + }) +}) + describe("ProviderTransform.variants", () => { const createMockModel = (overrides: Partial = {}): any => ({ id: "test/test-model", @@ -1408,6 +1675,32 @@ describe("ProviderTransform.variants", () => { }) describe("@ai-sdk/gateway", () => { + test("anthropic models return anthropic thinking options", () => { + const model = createMockModel({ + id: "anthropic/claude-sonnet-4", + providerID: "gateway", + api: { + id: "anthropic/claude-sonnet-4", + url: "https://gateway.ai", + npm: "@ai-sdk/gateway", + }, + }) + const result = ProviderTransform.variants(model) + expect(Object.keys(result)).toEqual(["high", "max"]) + expect(result.high).toEqual({ + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + }) + expect(result.max).toEqual({ + thinking: { + type: "enabled", + budgetTokens: 31999, + }, + }) + }) + test("returns OPENAI_EFFORTS with reasoningEffort", () => { const model = createMockModel({ id: "gateway/gateway-model",