From f1138b9a5571b6ce64938d5e719c1bbc75ea994e Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Wed, 3 Dec 2025 12:04:36 +0000 Subject: [PATCH 01/27] ignore: update download stats 2025-12-03 --- STATS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/STATS.md b/STATS.md index a9807ddf55a0..25678e915d56 100644 --- a/STATS.md +++ b/STATS.md @@ -158,3 +158,4 @@ | 2025-11-30 | 916,116 (+7,427) | 870,194 (+6,833) | 1,786,310 (+14,260) | | 2025-12-01 | 925,898 (+9,782) | 876,500 (+6,306) | 1,802,398 (+16,088) | | 2025-12-02 | 939,250 (+13,352) | 890,919 (+14,419) | 1,830,169 (+27,771) | +| 2025-12-03 | 952,249 (+12,999) | 903,713 (+12,794) | 1,855,962 (+25,793) | From 5b34636afa869dfe33cd0546128171ad76d82519 Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 10:46:48 -0600 Subject: [PATCH 02/27] ignore: docs & style --- .opencode/opencode.jsonc | 1 + AGENTS.md | 13 ------------- CONTRIBUTING.md | 2 ++ STYLE_GUIDE.md | 12 ++++++++++++ 4 files changed, 15 insertions(+), 13 deletions(-) create mode 100644 STYLE_GUIDE.md diff --git a/.opencode/opencode.jsonc b/.opencode/opencode.jsonc index ce4a6658bf5e..c34163889614 100644 --- a/.opencode/opencode.jsonc +++ b/.opencode/opencode.jsonc @@ -4,6 +4,7 @@ // "enterprise": { // "url": "https://enterprise.dev.opencode.ai", // }, + "instructions": ["STYLE_GUIDE.md"], "provider": { "opencode": { "options": { diff --git a/AGENTS.md b/AGENTS.md index 22b305dac137..5a95fc509fec 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,16 +1,3 @@ -## IMPORTANT - -- Try to keep things in one function unless composable or reusable -- DO NOT do unnecessary destructuring of variables -- DO NOT use `else` statements unless necessary -- DO NOT use `try`/`catch` if it can be avoided -- AVOID `try`/`catch` where possible -- AVOID `else` statements -- AVOID using `any` type -- AVOID `let` statements -- PREFER single word variable names where possible -- Use as many bun apis as possible like Bun.file() - ## Debugging - To test opencode in the `packages/opencode` directory you can run `bun dev` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2fc5737d795e..6a24995e81af 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -42,6 +42,8 @@ Want to take on an issue? Leave a comment and a maintainer may assign it to you > [!NOTE] > After touching `packages/opencode/src/server/server.ts`, run "./packages/sdk/js/script/build.ts" to regenerate the JS sdk. +Please try to follow the [style guide](./STYLE_GUIDE.md) + ### Setting up a Debugger Bun debugging is currently rough around the edges. We hope this guide helps you get set up and avoid some pain points. diff --git a/STYLE_GUIDE.md b/STYLE_GUIDE.md new file mode 100644 index 000000000000..164f69bd46ce --- /dev/null +++ b/STYLE_GUIDE.md @@ -0,0 +1,12 @@ +## Style Guide + +- Try to keep things in one function unless composable or reusable +- DO NOT do unnecessary destructuring of variables +- DO NOT use `else` statements unless necessary +- DO NOT use `try`/`catch` if it can be avoided +- AVOID `try`/`catch` where possible +- AVOID `else` statements +- AVOID using `any` type +- AVOID `let` statements +- PREFER single word variable names where possible +- Use as many bun apis as possible like Bun.file() From 0eb97086fce1283047fd2296fcdbc37b09b4763f Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Wed, 3 Dec 2025 16:47:25 +0000 Subject: [PATCH 03/27] chore: format code --- packages/plugin/package.json | 2 +- packages/sdk/js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/plugin/package.json b/packages/plugin/package.json index 8df84bfdcde0..2871fc7db2e7 100644 --- a/packages/plugin/package.json +++ b/packages/plugin/package.json @@ -24,4 +24,4 @@ "typescript": "catalog:", "@typescript/native-preview": "catalog:" } -} \ No newline at end of file +} diff --git a/packages/sdk/js/package.json b/packages/sdk/js/package.json index 0995f119206d..9ea1689ae24a 100644 --- a/packages/sdk/js/package.json +++ b/packages/sdk/js/package.json @@ -26,4 +26,4 @@ "publishConfig": { "directory": "dist" } -} \ No newline at end of file +} From 91db82c138cc7ab1e046d078e44e6336669da3a6 Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 11:16:10 -0600 Subject: [PATCH 04/27] add retry case for grok resource exhausted --- packages/opencode/src/session/retry.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/opencode/src/session/retry.ts b/packages/opencode/src/session/retry.ts index ace7350b2204..3cabacdb87b9 100644 --- a/packages/opencode/src/session/retry.ts +++ b/packages/opencode/src/session/retry.ts @@ -65,6 +65,9 @@ export namespace SessionRetry { if (json.type === "error" && json.error?.type === "too_many_requests") { return "Too Many Requests" } + if (json.code === "Some resource has been exhausted") { + return "Provider is overloaded" + } } catch {} } From 0bccd1d5788d607f761cabf4d2d1a58980c60fe4 Mon Sep 17 00:00:00 2001 From: Spoon <212802214+spoons-and-mirrors@users.noreply.github.com> Date: Wed, 3 Dec 2025 18:19:43 +0100 Subject: [PATCH 05/27] feat: experimental.primary_tools, allow user to set the tools that should only be available to primary agents (#4913) Co-authored-by: GitHub Action --- packages/opencode/src/config/config.ts | 4 ++++ packages/opencode/src/tool/task.ts | 4 ++++ packages/sdk/js/src/gen/types.gen.ts | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 3ed487653d52..9cae9370d400 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -668,6 +668,10 @@ export namespace Config { chatMaxRetries: z.number().optional().describe("Number of retries for chat completions on failure"), disable_paste_summary: z.boolean().optional(), batch_tool: z.boolean().optional().describe("Enable the batch tool"), + primary_tools: z + .array(z.string()) + .optional() + .describe("Tools that should only be available to primary agents."), }) .optional(), }) diff --git a/packages/opencode/src/tool/task.ts b/packages/opencode/src/tool/task.ts index 3bb7fb2bf39a..35b9631247ee 100644 --- a/packages/opencode/src/tool/task.ts +++ b/packages/opencode/src/tool/task.ts @@ -9,6 +9,7 @@ import { Agent } from "../agent/agent" import { SessionPrompt } from "../session/prompt" import { iife } from "@/util/iife" import { defer } from "@/util/defer" +import { Config } from "../config/config" export const TaskTool = Tool.define("task", async () => { const agents = await Agent.list().then((x) => x.filter((a) => a.mode !== "primary")) @@ -77,6 +78,8 @@ export const TaskTool = Tool.define("task", async () => { ctx.abort.addEventListener("abort", cancel) using _ = defer(() => ctx.abort.removeEventListener("abort", cancel)) const promptParts = await SessionPrompt.resolvePromptParts(params.prompt) + + const config = await Config.get() const result = await SessionPrompt.prompt({ messageID, sessionID: session.id, @@ -89,6 +92,7 @@ export const TaskTool = Tool.define("task", async () => { todowrite: false, todoread: false, task: false, + ...Object.fromEntries((config.experimental?.primary_tools ?? []).map((t) => [t, false])), ...agent.tools, }, parts: promptParts, diff --git a/packages/sdk/js/src/gen/types.gen.ts b/packages/sdk/js/src/gen/types.gen.ts index f8e1d34fb6e7..d26c6ec2ce77 100644 --- a/packages/sdk/js/src/gen/types.gen.ts +++ b/packages/sdk/js/src/gen/types.gen.ts @@ -1249,6 +1249,10 @@ export type Config = { * Enable the batch tool */ batch_tool?: boolean + /** + * Tools that should only be available to primary agents. + */ + primary_tools?: Array } } From c5b4cc80cc4ad07dfe68847fec518779f2cce65c Mon Sep 17 00:00:00 2001 From: Luke Parker <10430890+Hona@users.noreply.github.com> Date: Thu, 4 Dec 2025 03:21:13 +1000 Subject: [PATCH 06/27] fix: bunfs path on windows (#5011) --- packages/opencode/script/build.ts | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/packages/opencode/script/build.ts b/packages/opencode/script/build.ts index 98c332e32269..64f43b748dfb 100755 --- a/packages/opencode/script/build.ts +++ b/packages/opencode/script/build.ts @@ -16,6 +16,7 @@ import pkg from "../package.json" import { Script } from "@opencode-ai/script" const singleFlag = process.argv.includes("--single") +const skipInstall = process.argv.includes("--skip-install") const allTargets: { os: string @@ -83,8 +84,10 @@ const targets = singleFlag await $`rm -rf dist` const binaries: Record = {} -await $`bun install --os="*" --cpu="*" @opentui/core@${pkg.dependencies["@opentui/core"]}` -await $`bun install --os="*" --cpu="*" @parcel/watcher@${pkg.dependencies["@parcel/watcher"]}` +if (!skipInstall) { + await $`bun install --os="*" --cpu="*" @opentui/core@${pkg.dependencies["@opentui/core"]}` + await $`bun install --os="*" --cpu="*" @parcel/watcher@${pkg.dependencies["@parcel/watcher"]}` +} for (const item of targets) { const name = [ pkg.name, @@ -102,6 +105,10 @@ for (const item of targets) { const parserWorker = fs.realpathSync(path.resolve(dir, "./node_modules/@opentui/core/parser.worker.js")) const workerPath = "./src/cli/cmd/tui/worker.ts" + // Use platform-specific bunfs root path based on target OS + const bunfsRoot = item.os === "win32" ? "B:/~BUN/root/" : "/$bunfs/root/" + const workerRelativePath = path.relative(dir, parserWorker).replaceAll("\\", "/") + await Bun.build({ conditions: ["browser"], tsconfig: "./tsconfig.json", @@ -118,7 +125,7 @@ for (const item of targets) { entrypoints: ["./src/index.ts", parserWorker, workerPath], define: { OPENCODE_VERSION: `'${Script.version}'`, - OTUI_TREE_SITTER_WORKER_PATH: "/$bunfs/root/" + path.relative(dir, parserWorker).replaceAll("\\", "/"), + OTUI_TREE_SITTER_WORKER_PATH: bunfsRoot + workerRelativePath, OPENCODE_WORKER_PATH: workerPath, OPENCODE_CHANNEL: `'${Script.channel}'`, }, From 921b98066d4f3d1f464435eaa3d28c1edd949cc0 Mon Sep 17 00:00:00 2001 From: Ariane Emory <97994360+ariane-emory@users.noreply.github.com> Date: Wed, 3 Dec 2025 13:30:11 -0500 Subject: [PATCH 07/27] feat: add messages_last_user command to scroll TUI to last user message (implements #4847) (#4855) Co-authored-by: GitHub Action Co-authored-by: Aiden Cline <63023139+rekram1-node@users.noreply.github.com> --- .../src/cli/cmd/tui/routes/session/index.tsx | 31 +++++++++++++++++++ packages/opencode/src/config/config.ts | 1 + packages/sdk/js/src/gen/types.gen.ts | 4 +++ packages/web/src/content/docs/keybinds.mdx | 1 + 4 files changed, 37 insertions(+) diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx index 54020af93ffe..b97b89c1277d 100644 --- a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx @@ -557,6 +557,37 @@ export function Session() { dialog.clear() }, }, + { + title: "Jump to last user message", + value: "session.messages_last_user", + keybind: "messages_last_user", + category: "Session", + onSelect: () => { + const messages = sync.data.message[route.sessionID] + if (!messages || !messages.length) return + + // Find the most recent user message with non-ignored, non-synthetic text parts + for (let i = messages.length - 1; i >= 0; i--) { + const message = messages[i] + if (!message || message.role !== "user") continue + + const parts = sync.data.part[message.id] + if (!parts || !Array.isArray(parts)) continue + + const hasValidTextPart = parts.some( + (part) => part && part.type === "text" && !part.synthetic && !part.ignored, + ) + + if (hasValidTextPart) { + const child = scroll.getChildren().find((child) => { + return child.id === message.id + }) + if (child) scroll.scrollBy(child.y - scroll.y - 1) + break + } + } + }, + }, { title: "Copy last assistant message", value: "messages.copy", diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 9cae9370d400..09a9e63b97b3 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -418,6 +418,7 @@ export namespace Config { .describe("Scroll messages down by half page"), messages_first: z.string().optional().default("ctrl+g,home").describe("Navigate to first message"), messages_last: z.string().optional().default("ctrl+alt+g,end").describe("Navigate to last message"), + messages_last_user: z.string().optional().default("none").describe("Navigate to last user message"), messages_copy: z.string().optional().default("y").describe("Copy message"), messages_undo: z.string().optional().default("u").describe("Undo message"), messages_redo: z.string().optional().default("r").describe("Redo message"), diff --git a/packages/sdk/js/src/gen/types.gen.ts b/packages/sdk/js/src/gen/types.gen.ts index d26c6ec2ce77..8ecb12935aef 100644 --- a/packages/sdk/js/src/gen/types.gen.ts +++ b/packages/sdk/js/src/gen/types.gen.ts @@ -796,6 +796,10 @@ export type KeybindsConfig = { * Navigate to last message */ messages_last?: string + /** + * Navigate to last user message + */ + messages_last_user?: string /** * Copy message */ diff --git a/packages/web/src/content/docs/keybinds.mdx b/packages/web/src/content/docs/keybinds.mdx index 80a74c159bb9..a32756e18c8e 100644 --- a/packages/web/src/content/docs/keybinds.mdx +++ b/packages/web/src/content/docs/keybinds.mdx @@ -35,6 +35,7 @@ OpenCode has a list of keybinds that you can customize through the OpenCode conf "messages_copy": "y", "messages_undo": "u", "messages_redo": "r", + "messages_last_user": "none", "messages_toggle_conceal": "h", "model_list": "m", "model_cycle_recent": "f2", From c3c9003dbbcff0c93d77ec37897f787b4311416a Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 12:45:01 -0600 Subject: [PATCH 08/27] ci: add pr review --- .github/guidelines-check.yml | 57 ----------------------- .github/workflows/review.yml | 89 ++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 57 deletions(-) delete mode 100644 .github/guidelines-check.yml create mode 100644 .github/workflows/review.yml diff --git a/.github/guidelines-check.yml b/.github/guidelines-check.yml deleted file mode 100644 index 522e52a5b2c8..000000000000 --- a/.github/guidelines-check.yml +++ /dev/null @@ -1,57 +0,0 @@ -# -# This file is intentionally in the wrong dir, will move and add later.... -# - -name: Guidelines Check - -on: - # Disabled - uncomment to re-enable - # pull_request_target: - # types: [opened, synchronize] - -jobs: - check-guidelines: - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: write - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - - name: Install opencode - run: curl -fsSL https://opencode.ai/install | bash - - - name: Check PR guidelines compliance - env: - ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - OPENCODE_PERMISSION: '{ "bash": { "gh*": "allow", "gh pr review*": "deny", "*": "deny" } }' - run: | - opencode run -m anthropic/claude-sonnet-4-20250514 "A new pull request has been created: '${{ github.event.pull_request.title }}' - - - ${{ github.event.pull_request.number }} - - - - ${{ github.event.pull_request.body }} - - - Please check all the code changes in this pull request against the guidelines in AGENTS.md file in this repository. Diffs are important but make sure you read the entire file to get proper context. Make it clear the suggestions are merely suggestions and the human can decide what to do - - Use the gh cli to create comments on the files for the violations. Try to leave the comment on the exact line number. If you have a suggested fix include it in a suggestion code block. - - Command MUST be like this. - ``` - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/comments \ - -f 'body=[summary of issue]' -f 'commit_id=${{ github.event.pull_request.head.sha }}' -f 'path=[path-to-file]' -F "line=[line]" -f 'side=RIGHT' - ``` - - Only create comments for actual violations. If the code follows all guidelines, don't run any gh commands." diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml new file mode 100644 index 000000000000..7bee92d1009b --- /dev/null +++ b/.github/workflows/review.yml @@ -0,0 +1,89 @@ +name: Guidelines Check + +on: + pull_request_target: + types: [opened] + issue_comment: + types: [created] + +jobs: + check-guidelines: + if: | + github.event_name == 'pull_request_target' || + (github.event_name == 'issue_comment' && + github.event.issue.pull_request && + startsWith(github.event.comment.body, '/review')) + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + steps: + - name: Check if user has write permission + if: github.event_name == 'issue_comment' + run: | + PERMISSION=$(gh api /repos/${{ github.repository }}/collaborators/${{ github.event.comment.user.login }}/permission --jq '.permission') + if [[ "$PERMISSION" != "write" && "$PERMISSION" != "admin" ]]; then + echo "User does not have write permission" + exit 1 + fi + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Get PR number + id: pr-number + run: | + if [ "${{ github.event_name }}" = "pull_request_target" ]; then + echo "number=${{ github.event.pull_request.number }}" >> $GITHUB_OUTPUT + else + echo "number=${{ github.event.issue.number }}" >> $GITHUB_OUTPUT + fi + + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Install opencode + run: curl -fsSL https://opencode.ai/install | bash + + - name: Get PR details + id: pr-details + run: | + PR_DATA=$(gh api /repos/${{ github.repository }}/pulls/${{ steps.pr-number.outputs.number }}) + echo "title=$(echo "$PR_DATA" | jq -r .title)" >> $GITHUB_OUTPUT + echo "body=$(echo "$PR_DATA" | jq -r .body)" >> $GITHUB_OUTPUT + echo "sha=$(echo "$PR_DATA" | jq -r .head.sha)" >> $GITHUB_OUTPUT + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Check PR guidelines compliance + env: + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + OPENCODE_PERMISSION: '{ "bash": { "gh*": "allow", "gh pr review*": "deny", "*": "deny" } }' + run: | + opencode run -m anthropic/claude-sonnet-4-5 "A new pull request has been created: '${{ steps.pr-details.outputs.title }}' + + + ${{ steps.pr-number.outputs.number }} + + + + ${{ steps.pr-details.outputs.body }} + + + Please check all the code changes in this pull request against the style guide, also look for any bugs if they exist. Diffs are important but make sure you read the entire file to get proper context. Make it clear the suggestions are merely suggestions and the human can decide what to do + + Use the gh cli to create comments on the files for the violations. Try to leave the comment on the exact line number. If you have a suggested fix include it in a suggestion code block. + + Command MUST be like this. + ``` + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/${{ github.repository }}/pulls/${{ steps.pr-number.outputs.number }}/comments \ + -f 'body=[summary of issue]' -f 'commit_id=${{ steps.pr-details.outputs.sha }}' -f 'path=[path-to-file]' -F "line=[line]" -f 'side=RIGHT' + ``` + + Only create comments for actual violations. If the code follows all guidelines, don't run any gh commands." From 3181c68cbba64def2b0d10d21e33eaf809df1bad Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 13:10:40 -0600 Subject: [PATCH 09/27] ci: make review only fire on non draft pr creation --- .github/workflows/review.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml index 7bee92d1009b..6e4f02021c73 100644 --- a/.github/workflows/review.yml +++ b/.github/workflows/review.yml @@ -9,10 +9,11 @@ on: jobs: check-guidelines: if: | - github.event_name == 'pull_request_target' || + (github.event_name == 'pull_request_target' && + github.event.pull_request.draft == false) || (github.event_name == 'issue_comment' && github.event.issue.pull_request && - startsWith(github.event.comment.body, '/review')) + startsWith(github.event.comment.body, '/review')) runs-on: ubuntu-latest permissions: contents: read From e5b13b767eaa60637a459ed4568dba44a74e98f1 Mon Sep 17 00:00:00 2001 From: Frank Date: Wed, 3 Dec 2025 14:24:37 -0500 Subject: [PATCH 10/27] zen: usage graph respect light/dark mode --- .../routes/workspace/[id]/graph-section.tsx | 40 +++++++++++++++---- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/packages/console/app/src/routes/workspace/[id]/graph-section.tsx b/packages/console/app/src/routes/workspace/[id]/graph-section.tsx index b13309d3d855..2423605d201d 100644 --- a/packages/console/app/src/routes/workspace/[id]/graph-section.tsx +++ b/packages/console/app/src/routes/workspace/[id]/graph-section.tsx @@ -158,9 +158,24 @@ export function GraphSection() { model: null as string | null, modelDropdownOpen: false, keyDropdownOpen: false, + colorScheme: "light" as "light" | "dark", }) const initialData = createAsync(() => queryCosts(params.id!, store.year, store.month)) + createEffect(() => { + if (typeof window === "undefined") return + + const mediaQuery = window.matchMedia("(prefers-color-scheme: dark)") + setStore({ colorScheme: mediaQuery.matches ? "dark" : "light" }) + + const handleColorSchemeChange = (e: MediaQueryListEvent) => { + setStore({ colorScheme: e.matches ? "dark" : "light" }) + } + + mediaQuery.addEventListener("change", handleColorSchemeChange) + onCleanup(() => mediaQuery.removeEventListener("change", handleColorSchemeChange)) + }) + const onPreviousMonth = async () => { const month = store.month === 0 ? 11 : store.month - 1 const year = store.month === 0 ? store.year - 1 : store.year @@ -210,6 +225,15 @@ export function GraphSection() { const dates = getDates() if (!data?.usage?.length) return null + store.colorScheme + const styles = getComputedStyle(document.documentElement) + const colorTextMuted = styles.getPropertyValue("--color-text-muted").trim() + const colorBorderMuted = styles.getPropertyValue("--color-border-muted").trim() + const colorBgElevated = styles.getPropertyValue("--color-bg-elevated").trim() + const colorText = styles.getPropertyValue("--color-text").trim() + const colorTextSecondary = styles.getPropertyValue("--color-text-secondary").trim() + const colorBorder = styles.getPropertyValue("--color-border").trim() + const dailyData = new Map>() for (const dateKey of dates) dailyData.set(dateKey, new Map()) @@ -252,7 +276,7 @@ export function GraphSection() { ticks: { maxRotation: 0, autoSkipPadding: 20, - color: "rgba(255, 255, 255, 0.5)", + color: colorTextMuted, font: { family: "monospace", size: 11, @@ -263,10 +287,10 @@ export function GraphSection() { stacked: true, beginAtZero: true, grid: { - color: "rgba(255, 255, 255, 0.1)", + color: colorBorderMuted, }, ticks: { - color: "rgba(255, 255, 255, 0.5)", + color: colorTextMuted, font: { family: "monospace", size: 11, @@ -282,10 +306,10 @@ export function GraphSection() { tooltip: { mode: "index", intersect: false, - backgroundColor: "rgba(0, 0, 0, 0.9)", - titleColor: "rgba(255, 255, 255, 0.9)", - bodyColor: "rgba(255, 255, 255, 0.8)", - borderColor: "rgba(255, 255, 255, 0.1)", + backgroundColor: colorBgElevated, + titleColor: colorText, + bodyColor: colorTextSecondary, + borderColor: colorBorder, borderWidth: 1, padding: 12, displayColors: true, @@ -301,7 +325,7 @@ export function GraphSection() { display: true, position: "bottom", labels: { - color: "rgba(255, 255, 255, 0.7)", + color: colorTextSecondary, font: { size: 12, }, From 8898bf7ca44a070d63f07a16812747f6535e8431 Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 13:32:20 -0600 Subject: [PATCH 11/27] ci: tweak review cmd --- .github/workflows/review.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml index 6e4f02021c73..78b7868ff562 100644 --- a/.github/workflows/review.yml +++ b/.github/workflows/review.yml @@ -50,10 +50,9 @@ jobs: - name: Get PR details id: pr-details run: | - PR_DATA=$(gh api /repos/${{ github.repository }}/pulls/${{ steps.pr-number.outputs.number }}) - echo "title=$(echo "$PR_DATA" | jq -r .title)" >> $GITHUB_OUTPUT - echo "body=$(echo "$PR_DATA" | jq -r .body)" >> $GITHUB_OUTPUT - echo "sha=$(echo "$PR_DATA" | jq -r .head.sha)" >> $GITHUB_OUTPUT + gh api /repos/${{ github.repository }}/pulls/${{ steps.pr-number.outputs.number }} > pr_data.json + echo "title=$(jq -r .title pr_data.json)" >> $GITHUB_OUTPUT + echo "sha=$(jq -r .head.sha pr_data.json)" >> $GITHUB_OUTPUT env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -63,6 +62,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} OPENCODE_PERMISSION: '{ "bash": { "gh*": "allow", "gh pr review*": "deny", "*": "deny" } }' run: | + PR_BODY=$(jq -r .body pr_data.json) opencode run -m anthropic/claude-sonnet-4-5 "A new pull request has been created: '${{ steps.pr-details.outputs.title }}' @@ -70,7 +70,7 @@ jobs: - ${{ steps.pr-details.outputs.body }} + $PR_BODY Please check all the code changes in this pull request against the style guide, also look for any bugs if they exist. Diffs are important but make sure you read the entire file to get proper context. Make it clear the suggestions are merely suggestions and the human can decide what to do @@ -78,13 +78,13 @@ jobs: Use the gh cli to create comments on the files for the violations. Try to leave the comment on the exact line number. If you have a suggested fix include it in a suggestion code block. Command MUST be like this. - ``` + \`\`\` gh api \ --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ + -H \"Accept: application/vnd.github+json\" \ + -H \"X-GitHub-Api-Version: 2022-11-28\" \ /repos/${{ github.repository }}/pulls/${{ steps.pr-number.outputs.number }}/comments \ - -f 'body=[summary of issue]' -f 'commit_id=${{ steps.pr-details.outputs.sha }}' -f 'path=[path-to-file]' -F "line=[line]" -f 'side=RIGHT' - ``` + -f 'body=[summary of issue]' -f 'commit_id=${{ steps.pr-details.outputs.sha }}' -f 'path=[path-to-file]' -F \"line=[line]\" -f 'side=RIGHT' + \`\`\` Only create comments for actual violations. If the code follows all guidelines, don't run any gh commands." From 70f47223569c960e6f8c696a0248e913c6d9e5ed Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 13:35:49 -0600 Subject: [PATCH 12/27] ci: review ready for review action --- .github/workflows/review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml index 78b7868ff562..d12cabee2437 100644 --- a/.github/workflows/review.yml +++ b/.github/workflows/review.yml @@ -2,7 +2,7 @@ name: Guidelines Check on: pull_request_target: - types: [opened] + types: [opened, ready_for_review] issue_comment: types: [created] From c00d4885c682ed6539a68144c3baf392d153b6a4 Mon Sep 17 00:00:00 2001 From: Ariane Emory <97994360+ariane-emory@users.noreply.github.com> Date: Wed, 3 Dec 2025 14:38:09 -0500 Subject: [PATCH 13/27] feat: add tool_details keybind w/ no default (#4976) Co-authored-by: Aiden Cline <63023139+rekram1-node@users.noreply.github.com> --- packages/opencode/src/cli/cmd/tui/routes/session/index.tsx | 1 + packages/opencode/src/config/config.ts | 1 + packages/sdk/js/src/gen/types.gen.ts | 4 ++++ 3 files changed, 6 insertions(+) diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx index b97b89c1277d..5d9ebbc7a133 100644 --- a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx @@ -483,6 +483,7 @@ export function Session() { { title: showDetails() ? "Hide tool details" : "Show tool details", value: "session.toggle.actions", + keybind: "tool_details", category: "Session", onSelect: (dialog) => { const newValue = !showDetails() diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 09a9e63b97b3..2bdbbca5b070 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -427,6 +427,7 @@ export namespace Config { .optional() .default("h") .describe("Toggle code block concealment in messages"), + tool_details: z.string().optional().default("none").describe("Toggle tool details visibility"), model_list: z.string().optional().default("m").describe("List available models"), model_cycle_recent: z.string().optional().default("f2").describe("Next recently used model"), model_cycle_recent_reverse: z.string().optional().default("shift+f2").describe("Previous recently used model"), diff --git a/packages/sdk/js/src/gen/types.gen.ts b/packages/sdk/js/src/gen/types.gen.ts index 8ecb12935aef..80348fb9ad40 100644 --- a/packages/sdk/js/src/gen/types.gen.ts +++ b/packages/sdk/js/src/gen/types.gen.ts @@ -816,6 +816,10 @@ export type KeybindsConfig = { * Toggle code block concealment in messages */ messages_toggle_conceal?: string + /** + * Toggle tool details visibility + */ + tool_details?: string /** * List available models */ From f00380d285b5bd88cb6275c5f17585622554f041 Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 16:16:08 -0600 Subject: [PATCH 14/27] ci: review tweak --- .github/workflows/review.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml index d12cabee2437..c726081fc8b7 100644 --- a/.github/workflows/review.yml +++ b/.github/workflows/review.yml @@ -74,6 +74,7 @@ jobs: Please check all the code changes in this pull request against the style guide, also look for any bugs if they exist. Diffs are important but make sure you read the entire file to get proper context. Make it clear the suggestions are merely suggestions and the human can decide what to do + When critiquing code against the style guide, be sure that the code is ACTUALLY in violation, don't complain about else statements if they already use early returns there. You may complain about excessive nesting though, regardless of else statement usage. Use the gh cli to create comments on the files for the violations. Try to leave the comment on the exact line number. If you have a suggested fix include it in a suggestion code block. From 7a4aa68706376f9d2c431437e9a7f020ffc40bae Mon Sep 17 00:00:00 2001 From: Frank Date: Wed, 3 Dec 2025 18:12:23 -0500 Subject: [PATCH 15/27] zen: fix chart loading closes #5030 --- .../console/app/src/routes/workspace/[id]/graph-section.tsx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/console/app/src/routes/workspace/[id]/graph-section.tsx b/packages/console/app/src/routes/workspace/[id]/graph-section.tsx index 2423605d201d..46418d61880c 100644 --- a/packages/console/app/src/routes/workspace/[id]/graph-section.tsx +++ b/packages/console/app/src/routes/workspace/[id]/graph-section.tsx @@ -221,6 +221,8 @@ export function GraphSection() { const isCurrentMonth = () => store.year === now.getFullYear() && store.month === now.getMonth() const chartConfig = createMemo((): ChartConfiguration | null => { + if (typeof window === "undefined") return null + const data = getData() const dates = getDates() if (!data?.usage?.length) return null From ee4437ff32fc2acbd2220060fc980a096730bcee Mon Sep 17 00:00:00 2001 From: Dax Raad Date: Wed, 3 Dec 2025 18:30:42 -0500 Subject: [PATCH 16/27] core: add provider test coverage for upcoming refactor Add comprehensive test suite for Provider module to ensure safe refactoring of provider internals. Tests cover: - Provider loading from env vars and config - Provider filtering (disabled_providers, enabled_providers) - Model whitelist/blacklist - Model aliasing and custom providers - getModel, getProvider, closest, defaultModel functions Also adds Env module for instance-scoped environment variable access, enabling isolated test environments without global state pollution. --- packages/opencode/src/env/index.ts | 26 + packages/opencode/src/provider/provider.ts | 28 +- packages/opencode/test/preload.ts | 33 +- .../opencode/test/provider/provider.test.ts | 1729 +++++++++++++++++ 4 files changed, 1805 insertions(+), 11 deletions(-) create mode 100644 packages/opencode/src/env/index.ts create mode 100644 packages/opencode/test/provider/provider.test.ts diff --git a/packages/opencode/src/env/index.ts b/packages/opencode/src/env/index.ts new file mode 100644 index 000000000000..56a8c921f1e7 --- /dev/null +++ b/packages/opencode/src/env/index.ts @@ -0,0 +1,26 @@ +import { Instance } from "../project/instance" + +export namespace Env { + const state = Instance.state(() => { + return { ...process.env } as Record + }) + + export function get(key: string) { + const env = state() + return env[key] + } + + export function all() { + return state() + } + + export function set(key: string, value: string) { + const env = state() + env[key] = value + } + + export function remove(key: string) { + const env = state() + delete env[key] + } +} diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index fef4677bc28b..1123e6bbed46 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -9,6 +9,7 @@ import { Plugin } from "../plugin" import { ModelsDev } from "./models" import { NamedError } from "@opencode-ai/util/error" import { Auth } from "../auth" +import { Env } from "../env" import { Instance } from "../project/instance" import { Flag } from "../flag/flag" import { iife } from "@/util/iife" @@ -64,7 +65,8 @@ export namespace Provider { }, async opencode(input) { const hasKey = await (async () => { - if (input.env.some((item) => process.env[item])) return true + const env = Env.all() + if (input.env.some((item) => env[item])) return true if (await Auth.get(input.id)) return true return false })() @@ -128,7 +130,7 @@ export namespace Provider { } }, "azure-cognitive-services": async () => { - const resourceName = process.env["AZURE_COGNITIVE_SERVICES_RESOURCE_NAME"] + const resourceName = Env.get("AZURE_COGNITIVE_SERVICES_RESOURCE_NAME") return { autoload: false, async getModel(sdk: any, modelID: string, options?: Record) { @@ -144,10 +146,15 @@ export namespace Provider { } }, "amazon-bedrock": async () => { - if (!process.env["AWS_PROFILE"] && !process.env["AWS_ACCESS_KEY_ID"] && !process.env["AWS_BEARER_TOKEN_BEDROCK"]) - return { autoload: false } + const [awsProfile, awsAccessKeyId, awsBearerToken, awsRegion] = await Promise.all([ + Env.get("AWS_PROFILE"), + Env.get("AWS_ACCESS_KEY_ID"), + Env.get("AWS_BEARER_TOKEN_BEDROCK"), + Env.get("AWS_REGION"), + ]) + if (!awsProfile && !awsAccessKeyId && !awsBearerToken) return { autoload: false } - const region = process.env["AWS_REGION"] ?? "us-east-1" + const region = awsRegion ?? "us-east-1" const { fromNodeProviderChain } = await import(await BunProc.install("@aws-sdk/credential-providers")) return { @@ -246,8 +253,8 @@ export namespace Provider { } }, "google-vertex": async () => { - const project = process.env["GOOGLE_CLOUD_PROJECT"] ?? process.env["GCP_PROJECT"] ?? process.env["GCLOUD_PROJECT"] - const location = process.env["GOOGLE_CLOUD_LOCATION"] ?? process.env["VERTEX_LOCATION"] ?? "us-east5" + const project = Env.get("GOOGLE_CLOUD_PROJECT") ?? Env.get("GCP_PROJECT") ?? Env.get("GCLOUD_PROJECT") + const location = Env.get("GOOGLE_CLOUD_LOCATION") ?? Env.get("VERTEX_LOCATION") ?? "us-east5" const autoload = Boolean(project) if (!autoload) return { autoload: false } return { @@ -263,8 +270,8 @@ export namespace Provider { } }, "google-vertex-anthropic": async () => { - const project = process.env["GOOGLE_CLOUD_PROJECT"] ?? process.env["GCP_PROJECT"] ?? process.env["GCLOUD_PROJECT"] - const location = process.env["GOOGLE_CLOUD_LOCATION"] ?? process.env["VERTEX_LOCATION"] ?? "global" + const project = Env.get("GOOGLE_CLOUD_PROJECT") ?? Env.get("GCP_PROJECT") ?? Env.get("GCLOUD_PROJECT") + const location = Env.get("GOOGLE_CLOUD_LOCATION") ?? Env.get("VERTEX_LOCATION") ?? "global" const autoload = Boolean(project) if (!autoload) return { autoload: false } return { @@ -435,9 +442,10 @@ export namespace Provider { } // load env + const env = Env.all() for (const [providerID, provider] of Object.entries(database)) { if (disabled.has(providerID)) continue - const apiKey = provider.env.map((item) => process.env[item]).at(0) + const apiKey = provider.env.map((item) => env[item]).find(Boolean) if (!apiKey) continue mergeProvider( providerID, diff --git a/packages/opencode/test/preload.ts b/packages/opencode/test/preload.ts index 16fb3cd21840..43d012740245 100644 --- a/packages/opencode/test/preload.ts +++ b/packages/opencode/test/preload.ts @@ -1,4 +1,35 @@ -import { Log } from "../src/util/log" +// IMPORTANT: Set env vars BEFORE any imports from src/ directory +// xdg-basedir reads env vars at import time, so we must set these first +import os from "os" +import path from "path" + +const testDataDir = path.join(os.tmpdir(), "opencode-test-data-" + process.pid) +process.env["XDG_DATA_HOME"] = testDataDir +process.env["XDG_CACHE_HOME"] = path.join(testDataDir, "cache") +process.env["XDG_CONFIG_HOME"] = path.join(testDataDir, "config") +process.env["XDG_STATE_HOME"] = path.join(testDataDir, "state") + +// Clear provider env vars to ensure clean test state +delete process.env["ANTHROPIC_API_KEY"] +delete process.env["OPENAI_API_KEY"] +delete process.env["GOOGLE_API_KEY"] +delete process.env["GOOGLE_GENERATIVE_AI_API_KEY"] +delete process.env["AZURE_OPENAI_API_KEY"] +delete process.env["AWS_ACCESS_KEY_ID"] +delete process.env["AWS_PROFILE"] +delete process.env["OPENROUTER_API_KEY"] +delete process.env["GROQ_API_KEY"] +delete process.env["MISTRAL_API_KEY"] +delete process.env["PERPLEXITY_API_KEY"] +delete process.env["TOGETHER_API_KEY"] +delete process.env["XAI_API_KEY"] +delete process.env["DEEPSEEK_API_KEY"] +delete process.env["FIREWORKS_API_KEY"] +delete process.env["CEREBRAS_API_KEY"] +delete process.env["SAMBANOVA_API_KEY"] + +// Now safe to import from src/ +const { Log } = await import("../src/util/log") Log.init({ print: false, diff --git a/packages/opencode/test/provider/provider.test.ts b/packages/opencode/test/provider/provider.test.ts new file mode 100644 index 000000000000..fa31d9d4f13c --- /dev/null +++ b/packages/opencode/test/provider/provider.test.ts @@ -0,0 +1,1729 @@ +import { test, expect } from "bun:test" +import path from "path" +import { tmpdir } from "../fixture/fixture" +import { Instance } from "../../src/project/instance" +import { Provider } from "../../src/provider/provider" +import { Env } from "../../src/env" + +test("provider loaded from env variable", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + // Note: source becomes "custom" because CUSTOM_LOADERS run after env loading + // and anthropic has a custom loader that merges additional options + expect(providers["anthropic"].source).toBe("custom") + }, + }) +}) + +test("provider loaded from config with apiKey option", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + options: { + apiKey: "config-api-key", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + }, + }) +}) + +test("disabled_providers excludes provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + disabled_providers: ["anthropic"], + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeUndefined() + }, + }) +}) + +test("enabled_providers restricts to only listed providers", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + enabled_providers: ["anthropic"], + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + Env.set("OPENAI_API_KEY", "test-openai-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + expect(providers["openai"]).toBeUndefined() + }, + }) +}) + +test("model whitelist filters models for provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + whitelist: ["claude-sonnet-4-20250514"], + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + const models = Object.keys(providers["anthropic"].info.models) + expect(models).toContain("claude-sonnet-4-20250514") + expect(models.length).toBe(1) + }, + }) +}) + +test("model blacklist excludes specific models", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + blacklist: ["claude-sonnet-4-20250514"], + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + const models = Object.keys(providers["anthropic"].info.models) + expect(models).not.toContain("claude-sonnet-4-20250514") + }, + }) +}) + +test("custom model alias via config", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + "my-alias": { + id: "claude-sonnet-4-20250514", + name: "My Custom Alias", + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + expect(providers["anthropic"].info.models["my-alias"]).toBeDefined() + expect(providers["anthropic"].info.models["my-alias"].name).toBe("My Custom Alias") + }, + }) +}) + +test("custom provider with npm package", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "custom-provider": { + name: "Custom Provider", + npm: "@ai-sdk/openai-compatible", + api: "https://api.custom.com/v1", + env: ["CUSTOM_API_KEY"], + models: { + "custom-model": { + name: "Custom Model", + tool_call: true, + limit: { + context: 128000, + output: 4096, + }, + }, + }, + options: { + apiKey: "custom-key", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["custom-provider"]).toBeDefined() + expect(providers["custom-provider"].info.name).toBe("Custom Provider") + expect(providers["custom-provider"].info.models["custom-model"]).toBeDefined() + }, + }) +}) + +test("env variable takes precedence, config merges options", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + options: { + timeout: 60000, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "env-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + // Config options should be merged + expect(providers["anthropic"].options.timeout).toBe(60000) + }, + }) +}) + +test("getModel returns model for valid provider/model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model = await Provider.getModel("anthropic", "claude-sonnet-4-20250514") + expect(model).toBeDefined() + expect(model.providerID).toBe("anthropic") + expect(model.modelID).toBe("claude-sonnet-4-20250514") + expect(model.language).toBeDefined() + }, + }) +}) + +test("getModel throws ModelNotFoundError for invalid model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + expect(Provider.getModel("anthropic", "nonexistent-model")).rejects.toThrow() + }, + }) +}) + +test("getModel throws ModelNotFoundError for invalid provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + expect(Provider.getModel("nonexistent-provider", "some-model")).rejects.toThrow() + }, + }) +}) + +test("parseModel correctly parses provider/model string", () => { + const result = Provider.parseModel("anthropic/claude-sonnet-4") + expect(result.providerID).toBe("anthropic") + expect(result.modelID).toBe("claude-sonnet-4") +}) + +test("parseModel handles model IDs with slashes", () => { + const result = Provider.parseModel("openrouter/anthropic/claude-3-opus") + expect(result.providerID).toBe("openrouter") + expect(result.modelID).toBe("anthropic/claude-3-opus") +}) + +test("defaultModel returns first available model when no config set", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model = await Provider.defaultModel() + expect(model.providerID).toBeDefined() + expect(model.modelID).toBeDefined() + }, + }) +}) + +test("defaultModel respects config model setting", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + model: "anthropic/claude-sonnet-4-20250514", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model = await Provider.defaultModel() + expect(model.providerID).toBe("anthropic") + expect(model.modelID).toBe("claude-sonnet-4-20250514") + }, + }) +}) + +test("provider with baseURL from config", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "custom-openai": { + name: "Custom OpenAI", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "gpt-4": { + name: "GPT-4", + tool_call: true, + limit: { context: 128000, output: 4096 }, + }, + }, + options: { + apiKey: "test-key", + baseURL: "https://custom.openai.com/v1", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["custom-openai"]).toBeDefined() + expect(providers["custom-openai"].options.baseURL).toBe("https://custom.openai.com/v1") + }, + }) +}) + +test("model cost defaults to zero when not specified", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "test-provider": { + name: "Test Provider", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "test-model": { + name: "Test Model", + tool_call: true, + limit: { context: 128000, output: 4096 }, + }, + }, + options: { + apiKey: "test-key", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const model = providers["test-provider"].info.models["test-model"] + expect(model.cost.input).toBe(0) + expect(model.cost.output).toBe(0) + expect(model.cost.cache_read).toBe(0) + expect(model.cost.cache_write).toBe(0) + }, + }) +}) + +test("model options are merged from existing model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + "claude-sonnet-4-20250514": { + options: { + customOption: "custom-value", + }, + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + const model = providers["anthropic"].info.models["claude-sonnet-4-20250514"] + expect(model.options.customOption).toBe("custom-value") + }, + }) +}) + +test("provider removed when all models filtered out", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + whitelist: ["nonexistent-model"], + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeUndefined() + }, + }) +}) + +test("closest finds model by partial match", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const result = await Provider.closest("anthropic", ["sonnet-4"]) + expect(result).toBeDefined() + expect(result?.providerID).toBe("anthropic") + expect(result?.modelID).toContain("sonnet-4") + }, + }) +}) + +test("closest returns undefined for nonexistent provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await Provider.closest("nonexistent", ["model"]) + expect(result).toBeUndefined() + }, + }) +}) + +test("getModel uses realIdByKey for aliased models", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + "my-sonnet": { + id: "claude-sonnet-4-20250514", + name: "My Sonnet Alias", + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"].info.models["my-sonnet"]).toBeDefined() + + const model = await Provider.getModel("anthropic", "my-sonnet") + expect(model).toBeDefined() + expect(model.modelID).toBe("my-sonnet") + expect(model.info.name).toBe("My Sonnet Alias") + }, + }) +}) + +test("provider api field sets default baseURL", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "custom-api": { + name: "Custom API", + npm: "@ai-sdk/openai-compatible", + api: "https://api.example.com/v1", + env: [], + models: { + "model-1": { + name: "Model 1", + tool_call: true, + limit: { context: 8000, output: 2000 }, + }, + }, + options: { + apiKey: "test-key", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["custom-api"].options.baseURL).toBe("https://api.example.com/v1") + }, + }) +}) + +test("explicit baseURL overrides api field", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "custom-api": { + name: "Custom API", + npm: "@ai-sdk/openai-compatible", + api: "https://api.example.com/v1", + env: [], + models: { + "model-1": { + name: "Model 1", + tool_call: true, + limit: { context: 8000, output: 2000 }, + }, + }, + options: { + apiKey: "test-key", + baseURL: "https://custom.override.com/v1", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["custom-api"].options.baseURL).toBe("https://custom.override.com/v1") + }, + }) +}) + +test("model inherits properties from existing database model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + "claude-sonnet-4-20250514": { + name: "Custom Name for Sonnet", + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + const model = providers["anthropic"].info.models["claude-sonnet-4-20250514"] + expect(model.name).toBe("Custom Name for Sonnet") + expect(model.tool_call).toBe(true) + expect(model.attachment).toBe(true) + expect(model.limit.context).toBeGreaterThan(0) + }, + }) +}) + +test("disabled_providers prevents loading even with env var", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + disabled_providers: ["openai"], + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("OPENAI_API_KEY", "test-openai-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["openai"]).toBeUndefined() + }, + }) +}) + +test("enabled_providers with empty array allows no providers", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + enabled_providers: [], + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + Env.set("OPENAI_API_KEY", "test-openai-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(Object.keys(providers).length).toBe(0) + }, + }) +}) + +test("whitelist and blacklist can be combined", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + whitelist: ["claude-sonnet-4-20250514", "claude-opus-4-20250514"], + blacklist: ["claude-opus-4-20250514"], + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + const models = Object.keys(providers["anthropic"].info.models) + expect(models).toContain("claude-sonnet-4-20250514") + expect(models).not.toContain("claude-opus-4-20250514") + expect(models.length).toBe(1) + }, + }) +}) + +test("model modalities default correctly", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "test-provider": { + name: "Test", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "test-model": { + name: "Test Model", + tool_call: true, + limit: { context: 8000, output: 2000 }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const model = providers["test-provider"].info.models["test-model"] + expect(model.modalities).toEqual({ + input: ["text"], + output: ["text"], + }) + }, + }) +}) + +test("model with custom cost values", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "test-provider": { + name: "Test", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "test-model": { + name: "Test Model", + tool_call: true, + limit: { context: 8000, output: 2000 }, + cost: { + input: 5, + output: 15, + cache_read: 2.5, + cache_write: 7.5, + }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const model = providers["test-provider"].info.models["test-model"] + expect(model.cost.input).toBe(5) + expect(model.cost.output).toBe(15) + expect(model.cost.cache_read).toBe(2.5) + expect(model.cost.cache_write).toBe(7.5) + }, + }) +}) + +test("getSmallModel returns appropriate small model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model = await Provider.getSmallModel("anthropic") + expect(model).toBeDefined() + expect(model?.modelID).toContain("haiku") + }, + }) +}) + +test("getSmallModel respects config small_model override", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + small_model: "anthropic/claude-sonnet-4-20250514", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model = await Provider.getSmallModel("anthropic") + expect(model).toBeDefined() + expect(model?.providerID).toBe("anthropic") + expect(model?.modelID).toBe("claude-sonnet-4-20250514") + }, + }) +}) + +test("provider.sort prioritizes preferred models", () => { + const models = [ + { id: "random-model", name: "Random" }, + { id: "claude-sonnet-4-latest", name: "Claude Sonnet 4" }, + { id: "gpt-5-turbo", name: "GPT-5 Turbo" }, + { id: "other-model", name: "Other" }, + ] as any[] + + const sorted = Provider.sort(models) + expect(sorted[0].id).toContain("sonnet-4") + expect(sorted[0].id).toContain("latest") + expect(sorted[sorted.length - 1].id).not.toContain("gpt-5") + expect(sorted[sorted.length - 1].id).not.toContain("sonnet-4") +}) + +test("multiple providers can be configured simultaneously", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + options: { timeout: 30000 }, + }, + openai: { + options: { timeout: 60000 }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-anthropic-key") + Env.set("OPENAI_API_KEY", "test-openai-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + expect(providers["openai"]).toBeDefined() + expect(providers["anthropic"].options.timeout).toBe(30000) + expect(providers["openai"].options.timeout).toBe(60000) + }, + }) +}) + +test("provider with custom npm package", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "local-llm": { + name: "Local LLM", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "llama-3": { + name: "Llama 3", + tool_call: true, + limit: { context: 8192, output: 2048 }, + }, + }, + options: { + apiKey: "not-needed", + baseURL: "http://localhost:11434/v1", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["local-llm"]).toBeDefined() + expect(providers["local-llm"].info.npm).toBe("@ai-sdk/openai-compatible") + expect(providers["local-llm"].options.baseURL).toBe("http://localhost:11434/v1") + }, + }) +}) + +// Edge cases for model configuration + +test("model alias name defaults to alias key when id differs", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + sonnet: { + id: "claude-sonnet-4-20250514", + // no name specified - should default to "sonnet" (the key) + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"].info.models["sonnet"].name).toBe("sonnet") + }, + }) +}) + +test("provider with multiple env var options only includes apiKey when single env", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "multi-env": { + name: "Multi Env Provider", + npm: "@ai-sdk/openai-compatible", + env: ["MULTI_ENV_KEY_1", "MULTI_ENV_KEY_2"], + models: { + "model-1": { + name: "Model 1", + tool_call: true, + limit: { context: 8000, output: 2000 }, + }, + }, + options: { + baseURL: "https://api.example.com/v1", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("MULTI_ENV_KEY_1", "test-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["multi-env"]).toBeDefined() + // When multiple env options exist, apiKey should NOT be auto-set + expect(providers["multi-env"].options.apiKey).toBeUndefined() + }, + }) +}) + +test("provider with single env var includes apiKey automatically", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "single-env": { + name: "Single Env Provider", + npm: "@ai-sdk/openai-compatible", + env: ["SINGLE_ENV_KEY"], + models: { + "model-1": { + name: "Model 1", + tool_call: true, + limit: { context: 8000, output: 2000 }, + }, + }, + options: { + baseURL: "https://api.example.com/v1", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("SINGLE_ENV_KEY", "my-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["single-env"]).toBeDefined() + // Single env option should auto-set apiKey + expect(providers["single-env"].options.apiKey).toBe("my-api-key") + }, + }) +}) + +test("model cost overrides existing cost values", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + "claude-sonnet-4-20250514": { + cost: { + input: 999, + output: 888, + }, + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + const model = providers["anthropic"].info.models["claude-sonnet-4-20250514"] + expect(model.cost.input).toBe(999) + expect(model.cost.output).toBe(888) + }, + }) +}) + +test("completely new provider not in database can be configured", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "brand-new-provider": { + name: "Brand New", + npm: "@ai-sdk/openai-compatible", + env: [], + api: "https://new-api.com/v1", + models: { + "new-model": { + name: "New Model", + tool_call: true, + reasoning: true, + attachment: true, + temperature: true, + limit: { context: 32000, output: 8000 }, + modalities: { + input: ["text", "image"], + output: ["text"], + }, + }, + }, + options: { + apiKey: "new-key", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["brand-new-provider"]).toBeDefined() + expect(providers["brand-new-provider"].info.name).toBe("Brand New") + const model = providers["brand-new-provider"].info.models["new-model"] + expect(model.reasoning).toBe(true) + expect(model.attachment).toBe(true) + expect(model.modalities?.input).toContain("image") + }, + }) +}) + +test("disabled_providers and enabled_providers interaction", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + // enabled_providers takes precedence - only these are considered + enabled_providers: ["anthropic", "openai"], + // Then disabled_providers filters from the enabled set + disabled_providers: ["openai"], + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-anthropic") + Env.set("OPENAI_API_KEY", "test-openai") + Env.set("GOOGLE_GENERATIVE_AI_API_KEY", "test-google") + }, + fn: async () => { + const providers = await Provider.list() + // anthropic: in enabled, not in disabled = allowed + expect(providers["anthropic"]).toBeDefined() + // openai: in enabled, but also in disabled = NOT allowed + expect(providers["openai"]).toBeUndefined() + // google: not in enabled = NOT allowed (even though not disabled) + expect(providers["google"]).toBeUndefined() + }, + }) +}) + +test("model with tool_call false", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "no-tools": { + name: "No Tools Provider", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "basic-model": { + name: "Basic Model", + tool_call: false, + limit: { context: 4000, output: 1000 }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["no-tools"].info.models["basic-model"].tool_call).toBe(false) + }, + }) +}) + +test("model defaults tool_call to true when not specified", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "default-tools": { + name: "Default Tools Provider", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + model: { + name: "Model", + // tool_call not specified + limit: { context: 4000, output: 1000 }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["default-tools"].info.models["model"].tool_call).toBe(true) + }, + }) +}) + +test("model headers are preserved", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "headers-provider": { + name: "Headers Provider", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + model: { + name: "Model", + tool_call: true, + limit: { context: 4000, output: 1000 }, + headers: { + "X-Custom-Header": "custom-value", + Authorization: "Bearer special-token", + }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const model = providers["headers-provider"].info.models["model"] + expect(model.headers).toEqual({ + "X-Custom-Header": "custom-value", + Authorization: "Bearer special-token", + }) + }, + }) +}) + +test("provider env fallback - second env var used if first missing", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "fallback-env": { + name: "Fallback Env Provider", + npm: "@ai-sdk/openai-compatible", + env: ["PRIMARY_KEY", "FALLBACK_KEY"], + models: { + model: { + name: "Model", + tool_call: true, + limit: { context: 4000, output: 1000 }, + }, + }, + options: { baseURL: "https://api.example.com" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + // Only set fallback, not primary + Env.set("FALLBACK_KEY", "fallback-api-key") + }, + fn: async () => { + const providers = await Provider.list() + // Provider should load because fallback env var is set + expect(providers["fallback-env"]).toBeDefined() + }, + }) +}) + +test("getModel returns consistent results", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model1 = await Provider.getModel("anthropic", "claude-sonnet-4-20250514") + const model2 = await Provider.getModel("anthropic", "claude-sonnet-4-20250514") + expect(model1.providerID).toEqual(model2.providerID) + expect(model1.modelID).toEqual(model2.modelID) + expect(model1.info).toEqual(model2.info) + }, + }) +}) + +test("provider name defaults to id when not in database", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "my-custom-id": { + // no name specified + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + model: { + name: "Model", + tool_call: true, + limit: { context: 4000, output: 1000 }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["my-custom-id"].info.name).toBe("my-custom-id") + }, + }) +}) + +test("ModelNotFoundError includes suggestions for typos", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + try { + await Provider.getModel("anthropic", "claude-sonet-4") // typo: sonet instead of sonnet + expect(true).toBe(false) // Should not reach here + } catch (e: any) { + expect(e.data.suggestions).toBeDefined() + expect(e.data.suggestions.length).toBeGreaterThan(0) + } + }, + }) +}) + +test("ModelNotFoundError for provider includes suggestions", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + try { + await Provider.getModel("antropic", "claude-sonnet-4") // typo: antropic + expect(true).toBe(false) // Should not reach here + } catch (e: any) { + expect(e.data.suggestions).toBeDefined() + expect(e.data.suggestions).toContain("anthropic") + } + }, + }) +}) + +test("getProvider returns undefined for nonexistent provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const provider = await Provider.getProvider("nonexistent") + expect(provider).toBeUndefined() + }, + }) +}) + +test("getProvider returns provider info", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const provider = await Provider.getProvider("anthropic") + expect(provider).toBeDefined() + expect(provider?.info.id).toBe("anthropic") + }, + }) +}) + +test("closest returns undefined when no partial match found", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const result = await Provider.closest("anthropic", ["nonexistent-xyz-model"]) + expect(result).toBeUndefined() + }, + }) +}) + +test("closest checks multiple query terms in order", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + // First term won't match, second will + const result = await Provider.closest("anthropic", ["nonexistent", "haiku"]) + expect(result).toBeDefined() + expect(result?.modelID).toContain("haiku") + }, + }) +}) + +test("model limit defaults to zero when not specified", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "no-limit": { + name: "No Limit Provider", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + model: { + name: "Model", + tool_call: true, + // no limit specified + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const model = providers["no-limit"].info.models["model"] + expect(model.limit.context).toBe(0) + expect(model.limit.output).toBe(0) + }, + }) +}) + +test("provider options are deeply merged", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + options: { + headers: { + "X-Custom": "custom-value", + }, + timeout: 30000, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + // Custom options should be merged + expect(providers["anthropic"].options.timeout).toBe(30000) + expect(providers["anthropic"].options.headers["X-Custom"]).toBe("custom-value") + // anthropic custom loader adds its own headers, they should coexist + expect(providers["anthropic"].options.headers["anthropic-beta"]).toBeDefined() + }, + }) +}) From 6d3fc63658c0ed37e97c4adaacf76575874d2fb5 Mon Sep 17 00:00:00 2001 From: Dax Date: Wed, 3 Dec 2025 21:09:03 -0500 Subject: [PATCH 17/27] core: refactor provider and model system (#5033) Co-authored-by: opencode-agent[bot] Co-authored-by: thdxr --- .../desktop/src/components/prompt-input.tsx | 4 +- packages/opencode/src/agent/agent.ts | 3 +- packages/opencode/src/cli/cmd/models.ts | 2 +- packages/opencode/src/config/config.ts | 74 +-- packages/opencode/src/provider/models.ts | 78 ++- packages/opencode/src/provider/provider.ts | 509 +++++++++++------- packages/opencode/src/provider/transform.ts | 62 +-- packages/opencode/src/server/server.ts | 17 +- packages/opencode/src/session/compaction.ts | 129 +++-- packages/opencode/src/session/index.ts | 20 +- packages/opencode/src/session/processor.ts | 25 +- packages/opencode/src/session/prompt.ts | 277 +++++----- packages/opencode/src/session/summary.ts | 21 +- packages/opencode/src/session/system.ts | 14 +- packages/opencode/src/share/share-next.ts | 7 +- packages/opencode/src/tool/batch.ts | 2 +- packages/opencode/src/tool/read.ts | 2 +- packages/opencode/src/tool/registry.ts | 8 +- .../opencode/test/provider/provider.test.ts | 102 ++-- packages/sdk/js/src/gen/types.gen.ts | 254 +++++---- 20 files changed, 891 insertions(+), 719 deletions(-) diff --git a/packages/desktop/src/components/prompt-input.tsx b/packages/desktop/src/components/prompt-input.tsx index 976924223702..a311ae763854 100644 --- a/packages/desktop/src/components/prompt-input.tsx +++ b/packages/desktop/src/components/prompt-input.tsx @@ -456,9 +456,9 @@ export const PromptInput: Component = (props) => {
{i.name} - + - {DateTime.fromFormat(i.release_date, "yyyy-MM-dd").toFormat("LLL yyyy")} + {DateTime.fromFormat("unknown", "yyyy-MM-dd").toFormat("LLL yyyy")}
diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index b901b95c2fdb..0e7a7c5d3bfd 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -224,6 +224,7 @@ export namespace Agent { export async function generate(input: { description: string }) { const defaultModel = await Provider.defaultModel() const model = await Provider.getModel(defaultModel.providerID, defaultModel.modelID) + const language = await Provider.getLanguage(model) const system = SystemPrompt.header(defaultModel.providerID) system.push(PROMPT_GENERATE) const existing = await list() @@ -241,7 +242,7 @@ export namespace Agent { content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`, }, ], - model: model.language, + model: language, schema: z.object({ identifier: z.string(), whenToUse: z.string(), diff --git a/packages/opencode/src/cli/cmd/models.ts b/packages/opencode/src/cli/cmd/models.ts index 1ae4ae12ca91..156dae91c676 100644 --- a/packages/opencode/src/cli/cmd/models.ts +++ b/packages/opencode/src/cli/cmd/models.ts @@ -38,7 +38,7 @@ export const ModelsCommand = cmd({ function printModels(providerID: string, verbose?: boolean) { const provider = providers[providerID] - const sortedModels = Object.entries(provider.info.models).sort(([a], [b]) => a.localeCompare(b)) + const sortedModels = Object.entries(provider.models).sort(([a], [b]) => a.localeCompare(b)) for (const [modelID, model] of sortedModels) { process.stdout.write(`${providerID}/${modelID}`) process.stdout.write(EOL) diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 2bdbbca5b070..2c691cedb5f0 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -470,6 +470,42 @@ export namespace Config { }) export type Layout = z.infer + export const Provider = ModelsDev.Provider.partial() + .extend({ + whitelist: z.array(z.string()).optional(), + blacklist: z.array(z.string()).optional(), + models: z.record(z.string(), ModelsDev.Model.partial()).optional(), + options: z + .object({ + apiKey: z.string().optional(), + baseURL: z.string().optional(), + enterpriseUrl: z.string().optional().describe("GitHub Enterprise URL for copilot authentication"), + setCacheKey: z.boolean().optional().describe("Enable promptCacheKey for this provider (default false)"), + timeout: z + .union([ + z + .number() + .int() + .positive() + .describe( + "Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout.", + ), + z.literal(false).describe("Disable timeout for this provider entirely."), + ]) + .optional() + .describe( + "Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout.", + ), + }) + .catchall(z.any()) + .optional(), + }) + .strict() + .meta({ + ref: "ProviderConfig", + }) + export type Provider = z.infer + export const Info = z .object({ $schema: z.string().optional().describe("JSON schema reference for configuration validation"), @@ -536,43 +572,7 @@ export namespace Config { .optional() .describe("Agent configuration, see https://opencode.ai/docs/agent"), provider: z - .record( - z.string(), - ModelsDev.Provider.partial() - .extend({ - whitelist: z.array(z.string()).optional(), - blacklist: z.array(z.string()).optional(), - models: z.record(z.string(), ModelsDev.Model.partial()).optional(), - options: z - .object({ - apiKey: z.string().optional(), - baseURL: z.string().optional(), - enterpriseUrl: z.string().optional().describe("GitHub Enterprise URL for copilot authentication"), - setCacheKey: z - .boolean() - .optional() - .describe("Enable promptCacheKey for this provider (default false)"), - timeout: z - .union([ - z - .number() - .int() - .positive() - .describe( - "Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout.", - ), - z.literal(false).describe("Disable timeout for this provider entirely."), - ]) - .optional() - .describe( - "Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout.", - ), - }) - .catchall(z.any()) - .optional(), - }) - .strict(), - ) + .record(z.string(), Provider) .optional() .describe("Custom provider configurations and model overrides"), mcp: z.record(z.string(), Mcp).optional().describe("MCP (Model Context Protocol) server configurations"), diff --git a/packages/opencode/src/provider/models.ts b/packages/opencode/src/provider/models.ts index 676837e1521f..3d28787c88ff 100644 --- a/packages/opencode/src/provider/models.ts +++ b/packages/opencode/src/provider/models.ts @@ -9,16 +9,16 @@ export namespace ModelsDev { const log = Log.create({ service: "models.dev" }) const filepath = path.join(Global.Path.cache, "models.json") - export const Model = z - .object({ - id: z.string(), - name: z.string(), - release_date: z.string(), - attachment: z.boolean(), - reasoning: z.boolean(), - temperature: z.boolean(), - tool_call: z.boolean(), - cost: z.object({ + export const Model = z.object({ + id: z.string(), + name: z.string(), + release_date: z.string(), + attachment: z.boolean(), + reasoning: z.boolean(), + temperature: z.boolean(), + tool_call: z.boolean(), + cost: z + .object({ input: z.number(), output: z.number(), cache_read: z.number().optional(), @@ -31,40 +31,34 @@ export namespace ModelsDev { cache_write: z.number().optional(), }) .optional(), - }), - limit: z.object({ - context: z.number(), - output: z.number(), - }), - modalities: z - .object({ - input: z.array(z.enum(["text", "audio", "image", "video", "pdf"])), - output: z.array(z.enum(["text", "audio", "image", "video", "pdf"])), - }) - .optional(), - experimental: z.boolean().optional(), - status: z.enum(["alpha", "beta", "deprecated"]).optional(), - options: z.record(z.string(), z.any()), - headers: z.record(z.string(), z.string()).optional(), - provider: z.object({ npm: z.string() }).optional(), - }) - .meta({ - ref: "Model", - }) + }) + .optional(), + limit: z.object({ + context: z.number(), + output: z.number(), + }), + modalities: z + .object({ + input: z.array(z.enum(["text", "audio", "image", "video", "pdf"])), + output: z.array(z.enum(["text", "audio", "image", "video", "pdf"])), + }) + .optional(), + experimental: z.boolean().optional(), + status: z.enum(["alpha", "beta", "deprecated"]).optional(), + options: z.record(z.string(), z.any()), + headers: z.record(z.string(), z.string()).optional(), + provider: z.object({ npm: z.string() }).optional(), + }) export type Model = z.infer - export const Provider = z - .object({ - api: z.string().optional(), - name: z.string(), - env: z.array(z.string()), - id: z.string(), - npm: z.string().optional(), - models: z.record(z.string(), Model), - }) - .meta({ - ref: "Provider", - }) + export const Provider = z.object({ + api: z.string().optional(), + name: z.string(), + env: z.array(z.string()), + id: z.string(), + npm: z.string().optional(), + models: z.record(z.string(), Model), + }) export type Provider = z.infer diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 1123e6bbed46..2df4bc96b99f 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -1,8 +1,8 @@ import z from "zod" import fuzzysort from "fuzzysort" import { Config } from "../config/config" -import { mergeDeep, sortBy } from "remeda" -import { NoSuchModelError, type LanguageModel, type Provider as SDK } from "ai" +import { mapValues, mergeDeep, sortBy } from "remeda" +import { NoSuchModelError, type Provider as SDK } from "ai" import { Log } from "../util/log" import { BunProc } from "../bun" import { Plugin } from "../plugin" @@ -23,7 +23,7 @@ import { createVertex } from "@ai-sdk/google-vertex" import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic" import { createOpenAI } from "@ai-sdk/openai" import { createOpenAICompatible } from "@ai-sdk/openai-compatible" -import { createOpenRouter } from "@openrouter/ai-sdk-provider" +import { createOpenRouter, type LanguageModelV2 } from "@openrouter/ai-sdk-provider" import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/openai-compatible/src" export namespace Provider { @@ -43,14 +43,13 @@ export namespace Provider { "@ai-sdk/github-copilot": createGitHubCopilotOpenAICompatible, } - type CustomLoader = (provider: ModelsDev.Provider) => Promise<{ + type CustomModelLoader = (sdk: any, modelID: string, options?: Record) => Promise + type CustomLoader = (provider: Info) => Promise<{ autoload: boolean - getModel?: (sdk: any, modelID: string, options?: Record) => Promise + getModel?: CustomModelLoader options?: Record }> - type Source = "env" | "config" | "custom" | "api" - const CUSTOM_LOADERS: Record = { async anthropic() { return { @@ -280,7 +279,7 @@ export namespace Provider { project, location, }, - async getModel(sdk: any, modelID: string) { + async getModel(sdk, modelID) { const id = String(modelID).trim() return sdk.languageModel(id) }, @@ -299,10 +298,155 @@ export namespace Provider { }, } + export const Model = z + .object({ + id: z.string(), + providerID: z.string(), + api: z.object({ + id: z.string(), + url: z.string(), + npm: z.string(), + }), + name: z.string(), + capabilities: z.object({ + temperature: z.boolean(), + reasoning: z.boolean(), + attachment: z.boolean(), + toolcall: z.boolean(), + input: z.object({ + text: z.boolean(), + audio: z.boolean(), + image: z.boolean(), + video: z.boolean(), + pdf: z.boolean(), + }), + output: z.object({ + text: z.boolean(), + audio: z.boolean(), + image: z.boolean(), + video: z.boolean(), + pdf: z.boolean(), + }), + }), + cost: z.object({ + input: z.number(), + output: z.number(), + cache: z.object({ + read: z.number(), + write: z.number(), + }), + experimentalOver200K: z + .object({ + input: z.number(), + output: z.number(), + cache: z.object({ + read: z.number(), + write: z.number(), + }), + }) + .optional(), + }), + limit: z.object({ + context: z.number(), + output: z.number(), + }), + status: z.enum(["alpha", "beta", "deprecated", "active"]), + options: z.record(z.string(), z.any()), + headers: z.record(z.string(), z.string()), + }) + .meta({ + ref: "Model", + }) + export type Model = z.infer + + export const Info = z + .object({ + id: z.string(), + name: z.string(), + source: z.enum(["env", "config", "custom", "api"]), + env: z.string().array(), + key: z.string().optional(), + options: z.record(z.string(), z.any()), + models: z.record(z.string(), Model), + }) + .meta({ + ref: "Provider", + }) + export type Info = z.infer + + function fromModelsDevModel(provider: ModelsDev.Provider, model: ModelsDev.Model): Model { + return { + id: model.id, + providerID: provider.id, + name: model.name, + api: { + id: model.id, + url: provider.api!, + npm: model.provider?.npm ?? provider.npm ?? provider.id, + }, + status: model.status ?? "active", + headers: model.headers ?? {}, + options: model.options ?? {}, + cost: { + input: model.cost?.input ?? 0, + output: model.cost?.output ?? 0, + cache: { + read: model.cost?.cache_read ?? 0, + write: model.cost?.cache_write ?? 0, + }, + experimentalOver200K: model.cost?.context_over_200k + ? { + cache: { + read: model.cost.context_over_200k.cache_read ?? 0, + write: model.cost.context_over_200k.cache_write ?? 0, + }, + input: model.cost.context_over_200k.input, + output: model.cost.context_over_200k.output, + } + : undefined, + }, + limit: { + context: model.limit.context, + output: model.limit.output, + }, + capabilities: { + temperature: model.temperature, + reasoning: model.reasoning, + attachment: model.attachment, + toolcall: model.tool_call, + input: { + text: model.modalities?.input?.includes("text") ?? false, + audio: model.modalities?.input?.includes("audio") ?? false, + image: model.modalities?.input?.includes("image") ?? false, + video: model.modalities?.input?.includes("video") ?? false, + pdf: model.modalities?.input?.includes("pdf") ?? false, + }, + output: { + text: model.modalities?.output?.includes("text") ?? false, + audio: model.modalities?.output?.includes("audio") ?? false, + image: model.modalities?.output?.includes("image") ?? false, + video: model.modalities?.output?.includes("video") ?? false, + pdf: model.modalities?.output?.includes("pdf") ?? false, + }, + }, + } + } + + export function fromModelsDevProvider(provider: ModelsDev.Provider): Info { + return { + id: provider.id, + source: "custom", + name: provider.name, + env: provider.env ?? [], + options: {}, + models: mapValues(provider.models, (model) => fromModelsDevModel(provider, model)), + } + } + const state = Instance.state(async () => { using _ = log.time("state") const config = await Config.get() - const database = await ModelsDev.get() + const database = mapValues(await ModelsDev.get(), fromModelsDevProvider) const disabled = new Set(config.disabled_providers ?? []) const enabled = config.enabled_providers ? new Set(config.enabled_providers) : null @@ -313,54 +457,15 @@ export namespace Provider { return true } - const providers: { - [providerID: string]: { - source: Source - info: ModelsDev.Provider - getModel?: (sdk: any, modelID: string, options?: Record) => Promise - options: Record - } + const providers: { [providerID: string]: Info } = {} + const languages = new Map() + const modelLoaders: { + [providerID: string]: CustomModelLoader } = {} - const models = new Map< - string, - { - providerID: string - modelID: string - info: ModelsDev.Model - language: LanguageModel - npm?: string - } - >() const sdk = new Map() - // Maps `${provider}/${key}` to the provider’s actual model ID for custom aliases. - const realIdByKey = new Map() log.info("init") - function mergeProvider( - id: string, - options: Record, - source: Source, - getModel?: (sdk: any, modelID: string, options?: Record) => Promise, - ) { - const provider = providers[id] - if (!provider) { - const info = database[id] - if (!info) return - if (info.api && !options["baseURL"]) options["baseURL"] = info.api - providers[id] = { - source, - info, - options, - getModel, - } - return - } - provider.options = mergeDeep(provider.options, options) - provider.source = source - provider.getModel = getModel ?? provider.getModel - } - const configProviders = Object.entries(config.provider ?? {}) // Add GitHub Copilot Enterprise provider that inherits from GitHub Copilot @@ -370,19 +475,31 @@ export namespace Provider { ...githubCopilot, id: "github-copilot-enterprise", name: "GitHub Copilot Enterprise", - // Enterprise uses a different API endpoint - will be set dynamically based on auth - api: undefined, } } + function mergeProvider(providerID: string, provider: Partial) { + const existing = providers[providerID] + if (existing) { + // @ts-expect-error + providers[providerID] = mergeDeep(existing, provider) + return + } + const match = database[providerID] + if (!match) return + // @ts-expect-error + providers[providerID] = mergeDeep(match, provider) + } + + // extend database from config for (const [providerID, provider] of configProviders) { const existing = database[providerID] - const parsed: ModelsDev.Provider = { + const parsed: Info = { id: providerID, - npm: provider.npm ?? existing?.npm, name: provider.name ?? existing?.name ?? providerID, env: provider.env ?? existing?.env ?? [], - api: provider.api ?? existing?.api, + options: mergeDeep(existing?.options ?? {}, provider.options ?? {}), + source: "config", models: existing?.models ?? {}, } @@ -393,51 +510,53 @@ export namespace Provider { if (model.id && model.id !== modelID) return modelID return existing?.name ?? modelID }) - const parsedModel: ModelsDev.Model = { + const parsedModel: Model = { id: modelID, - name, - release_date: model.release_date ?? existing?.release_date, - attachment: model.attachment ?? existing?.attachment ?? false, - reasoning: model.reasoning ?? existing?.reasoning ?? false, - temperature: model.temperature ?? existing?.temperature ?? false, - tool_call: model.tool_call ?? existing?.tool_call ?? true, - cost: - !model.cost && !existing?.cost - ? { - input: 0, - output: 0, - cache_read: 0, - cache_write: 0, - } - : { - cache_read: 0, - cache_write: 0, - ...existing?.cost, - ...model.cost, - }, - options: { - ...existing?.options, - ...model.options, + api: { + id: model.id ?? existing?.api.id ?? modelID, + npm: model.provider?.npm ?? provider.npm ?? existing?.api.npm ?? providerID, + url: provider?.api ?? existing?.api.url, }, - limit: model.limit ?? - existing?.limit ?? { - context: 0, - output: 0, + status: model.status ?? existing?.status ?? "active", + name, + providerID, + capabilities: { + temperature: model.temperature ?? existing?.capabilities.temperature ?? false, + reasoning: model.reasoning ?? existing?.capabilities.reasoning ?? false, + attachment: model.attachment ?? existing?.capabilities.attachment ?? false, + toolcall: model.tool_call ?? existing?.capabilities.toolcall ?? true, + input: { + text: model.modalities?.input?.includes("text") ?? existing?.capabilities.input.text ?? true, + audio: model.modalities?.input?.includes("audio") ?? existing?.capabilities.input.audio ?? false, + image: model.modalities?.input?.includes("image") ?? existing?.capabilities.input.image ?? false, + video: model.modalities?.input?.includes("video") ?? existing?.capabilities.input.video ?? false, + pdf: model.modalities?.input?.includes("pdf") ?? existing?.capabilities.input.pdf ?? false, }, - modalities: model.modalities ?? - existing?.modalities ?? { - input: ["text"], - output: ["text"], + output: { + text: model.modalities?.output?.includes("text") ?? existing?.capabilities.output.text ?? true, + audio: model.modalities?.output?.includes("audio") ?? existing?.capabilities.output.audio ?? false, + image: model.modalities?.output?.includes("image") ?? existing?.capabilities.output.image ?? false, + video: model.modalities?.output?.includes("video") ?? existing?.capabilities.output.video ?? false, + pdf: model.modalities?.output?.includes("pdf") ?? existing?.capabilities.output.pdf ?? false, }, - headers: model.headers, - provider: model.provider ?? existing?.provider, - } - if (model.id && model.id !== modelID) { - realIdByKey.set(`${providerID}/${modelID}`, model.id) + }, + cost: { + input: model?.cost?.input ?? existing?.cost?.input ?? 0, + output: model?.cost?.output ?? existing?.cost?.output ?? 0, + cache: { + read: model?.cost?.cache_read ?? existing?.cost?.cache.read ?? 0, + write: model?.cost?.cache_write ?? existing?.cost?.cache.write ?? 0, + }, + }, + options: mergeDeep(existing?.options ?? {}, model.options ?? {}), + limit: { + context: model.limit?.context ?? existing?.limit?.context ?? 0, + output: model.limit?.output ?? existing?.limit?.output ?? 0, + }, + headers: mergeDeep(existing?.headers ?? {}, model.headers ?? {}), } parsed.models[modelID] = parsedModel } - database[providerID] = parsed } @@ -447,19 +566,20 @@ export namespace Provider { if (disabled.has(providerID)) continue const apiKey = provider.env.map((item) => env[item]).find(Boolean) if (!apiKey) continue - mergeProvider( - providerID, - // only include apiKey if there's only one potential option - provider.env.length === 1 ? { apiKey } : {}, - "env", - ) + mergeProvider(providerID, { + source: "env", + key: provider.env.length === 1 ? apiKey : undefined, + }) } // load apikeys for (const [providerID, provider] of Object.entries(await Auth.all())) { if (disabled.has(providerID)) continue if (provider.type === "api") { - mergeProvider(providerID, { apiKey: provider.key }, "api") + mergeProvider(providerID, { + source: "api", + key: provider.key, + }) } } @@ -485,7 +605,10 @@ export namespace Provider { // Load for the main provider if auth exists if (auth) { const options = await plugin.auth.loader(() => Auth.get(providerID) as any, database[plugin.auth.provider]) - mergeProvider(plugin.auth.provider, options ?? {}, "custom") + mergeProvider(plugin.auth.provider, { + source: "custom", + options: options, + }) } // If this is github-copilot plugin, also register for github-copilot-enterprise if auth exists @@ -498,7 +621,10 @@ export namespace Provider { () => Auth.get(enterpriseProviderID) as any, database[enterpriseProviderID], ) - mergeProvider(enterpriseProviderID, enterpriseOptions ?? {}, "custom") + mergeProvider(enterpriseProviderID, { + source: "custom", + options: enterpriseOptions, + }) } } } @@ -508,13 +634,21 @@ export namespace Provider { if (disabled.has(providerID)) continue const result = await fn(database[providerID]) if (result && (result.autoload || providers[providerID])) { - mergeProvider(providerID, result.options ?? {}, "custom", result.getModel) + if (result.getModel) modelLoaders[providerID] = result.getModel + mergeProvider(providerID, { + source: "custom", + options: result.options, + }) } } // load config for (const [providerID, provider] of configProviders) { - mergeProvider(providerID, provider.options ?? {}, "config") + const partial: Partial = { source: "config" } + if (provider.env) partial.env = provider.env + if (provider.name) partial.name = provider.name + if (provider.options) partial.options = provider.options + mergeProvider(providerID, partial) } for (const [providerID, provider] of Object.entries(providers)) { @@ -524,49 +658,43 @@ export namespace Provider { } if (providerID === "github-copilot" || providerID === "github-copilot-enterprise") { - provider.info.npm = "@ai-sdk/github-copilot" + provider.models = mapValues(provider.models, (model) => ({ + ...model, + api: { + ...model.api, + npm: "@ai-sdk/github-copilot", + }, + })) } const configProvider = config.provider?.[providerID] - const filteredModels = Object.fromEntries( - Object.entries(provider.info.models) - // Filter out blacklisted models - .filter( - ([modelID]) => - modelID !== "gpt-5-chat-latest" && !(providerID === "openrouter" && modelID === "openai/gpt-5-chat"), - ) - // Filter out experimental models - .filter( - ([, model]) => - ((!model.experimental && model.status !== "alpha") || Flag.OPENCODE_ENABLE_EXPERIMENTAL_MODELS) && - model.status !== "deprecated", - ) - // Filter by provider's whitelist/blacklist from config - .filter(([modelID]) => { - if (!configProvider) return true - - return ( - (!configProvider.blacklist || !configProvider.blacklist.includes(modelID)) && - (!configProvider.whitelist || configProvider.whitelist.includes(modelID)) - ) - }), - ) - provider.info.models = filteredModels + for (const [modelID, model] of Object.entries(provider.models)) { + model.api.id = model.api.id ?? model.id ?? modelID + if (modelID === "gpt-5-chat-latest" || (providerID === "openrouter" && modelID === "openai/gpt-5-chat")) + delete provider.models[modelID] + if ((model.status === "alpha" && !Flag.OPENCODE_ENABLE_EXPERIMENTAL_MODELS) || model.status === "deprecated") + delete provider.models[modelID] + if ( + (configProvider?.blacklist && configProvider.blacklist.includes(modelID)) || + (configProvider?.whitelist && !configProvider.whitelist.includes(modelID)) + ) + delete provider.models[modelID] + } - if (Object.keys(provider.info.models).length === 0) { + if (Object.keys(provider.models).length === 0) { delete providers[providerID] continue } - log.info("found", { providerID, npm: provider.info.npm }) + log.info("found", { providerID }) } return { - models, + models: languages, providers, sdk, - realIdByKey, + modelLoaders, } }) @@ -574,19 +702,28 @@ export namespace Provider { return state().then((state) => state.providers) } - async function getSDK(provider: ModelsDev.Provider, model: ModelsDev.Model) { - return (async () => { + async function getSDK(model: Model) { + try { using _ = log.time("getSDK", { - providerID: provider.id, + providerID: model.providerID, }) const s = await state() - const pkg = model.provider?.npm ?? provider.npm ?? provider.id - const options = { ...s.providers[provider.id]?.options } - if (pkg.includes("@ai-sdk/openai-compatible") && options["includeUsage"] === undefined) { + const provider = s.providers[model.providerID] + const options = { ...provider.options } + + if (model.api.npm.includes("@ai-sdk/openai-compatible") && options["includeUsage"] !== false) { options["includeUsage"] = true } - const key = Bun.hash.xxHash32(JSON.stringify({ pkg, options })) + if (!options["baseURL"]) options["baseURL"] = model.api.url + if (!options["apiKey"]) options["apiKey"] = provider.key + if (model.headers) + options["headers"] = { + ...options["headers"], + ...model.headers, + } + + const key = Bun.hash.xxHash32(JSON.stringify({ npm: model.api.npm, options })) const existing = s.sdk.get(key) if (existing) return existing @@ -615,12 +752,13 @@ export namespace Provider { } // Special case: google-vertex-anthropic uses a subpath import - const bundledKey = provider.id === "google-vertex-anthropic" ? "@ai-sdk/google-vertex/anthropic" : pkg + const bundledKey = + model.providerID === "google-vertex-anthropic" ? "@ai-sdk/google-vertex/anthropic" : model.api.npm const bundledFn = BUNDLED_PROVIDERS[bundledKey] if (bundledFn) { - log.info("using bundled provider", { providerID: provider.id, pkg: bundledKey }) + log.info("using bundled provider", { providerID: model.providerID, pkg: bundledKey }) const loaded = bundledFn({ - name: provider.id, + name: model.providerID, ...options, }) s.sdk.set(key, loaded) @@ -628,25 +766,25 @@ export namespace Provider { } let installedPath: string - if (!pkg.startsWith("file://")) { - installedPath = await BunProc.install(pkg, "latest") + if (!model.api.npm.startsWith("file://")) { + installedPath = await BunProc.install(model.api.npm, "latest") } else { - log.info("loading local provider", { pkg }) - installedPath = pkg + log.info("loading local provider", { pkg: model.api.npm }) + installedPath = model.api.npm } const mod = await import(installedPath) const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!] const loaded = fn({ - name: provider.id, + name: model.providerID, ...options, }) s.sdk.set(key, loaded) return loaded as SDK - })().catch((e) => { - throw new InitError({ providerID: provider.id }, { cause: e }) - }) + } catch (e) { + throw new InitError({ providerID: model.providerID }, { cause: e }) + } } export async function getProvider(providerID: string) { @@ -654,15 +792,7 @@ export namespace Provider { } export async function getModel(providerID: string, modelID: string) { - const key = `${providerID}/${modelID}` const s = await state() - if (s.models.has(key)) return s.models.get(key)! - - log.info("getModel", { - providerID, - modelID, - }) - const provider = s.providers[providerID] if (!provider) { const availableProviders = Object.keys(s.providers) @@ -671,43 +801,36 @@ export namespace Provider { throw new ModelNotFoundError({ providerID, modelID, suggestions }) } - const info = provider.info.models[modelID] + const info = provider.models[modelID] if (!info) { - const availableModels = Object.keys(provider.info.models) + const availableModels = Object.keys(provider.models) const matches = fuzzysort.go(modelID, availableModels, { limit: 3, threshold: -10000 }) const suggestions = matches.map((m) => m.target) throw new ModelNotFoundError({ providerID, modelID, suggestions }) } + return info + } + + export async function getLanguage(model: Model) { + const s = await state() + const key = `${model.providerID}/${model.id}` + if (s.models.has(key)) return s.models.get(key)! - const sdk = await getSDK(provider.info, info) + const provider = s.providers[model.providerID] + const sdk = await getSDK(model) try { - const keyReal = `${providerID}/${modelID}` - const realID = s.realIdByKey.get(keyReal) ?? info.id - const language = provider.getModel - ? await provider.getModel(sdk, realID, provider.options) - : sdk.languageModel(realID) - log.info("found", { providerID, modelID }) - s.models.set(key, { - providerID, - modelID, - info, - language, - npm: info.provider?.npm ?? provider.info.npm, - }) - return { - modelID, - providerID, - info, - language, - npm: info.provider?.npm ?? provider.info.npm, - } + const language = s.modelLoaders[model.providerID] + ? await s.modelLoaders[model.providerID](sdk, model.api.id, provider.options) + : sdk.languageModel(model.api.id) + s.models.set(key, language) + return language } catch (e) { if (e instanceof NoSuchModelError) throw new ModelNotFoundError( { - modelID: modelID, - providerID, + modelID: model.id, + providerID: model.providerID, }, { cause: e }, ) @@ -720,7 +843,7 @@ export namespace Provider { const provider = s.providers[providerID] if (!provider) return undefined for (const item of query) { - for (const modelID of Object.keys(provider.info.models)) { + for (const modelID of Object.keys(provider.models)) { if (modelID.includes(item)) return { providerID, @@ -756,7 +879,7 @@ export namespace Provider { priority = ["gpt-5-nano"] } for (const item of priority) { - for (const model of Object.keys(provider.info.models)) { + for (const model of Object.keys(provider.models)) { if (model.includes(item)) return getModel(providerID, model) } } @@ -764,7 +887,7 @@ export namespace Provider { // Check if opencode provider is available before using it const opencodeProvider = await state().then((state) => state.providers["opencode"]) - if (opencodeProvider && opencodeProvider.info.models["gpt-5-nano"]) { + if (opencodeProvider && opencodeProvider.models["gpt-5-nano"]) { return getModel("opencode", "gpt-5-nano") } @@ -772,7 +895,7 @@ export namespace Provider { } const priority = ["gpt-5", "claude-sonnet-4", "big-pickle", "gemini-3-pro"] - export function sort(models: ModelsDev.Model[]) { + export function sort(models: Model[]) { return sortBy( models, [(model) => priority.findIndex((filter) => model.id.includes(filter)), "desc"], @@ -787,12 +910,12 @@ export namespace Provider { const provider = await list() .then((val) => Object.values(val)) - .then((x) => x.find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.info.id))) + .then((x) => x.find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.id))) if (!provider) throw new Error("no providers found") - const [model] = sort(Object.values(provider.info.models)) + const [model] = sort(Object.values(provider.models)) if (!model) throw new Error("no models found") return { - providerID: provider.info.id, + providerID: provider.id, modelID: model.id, } } diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index abe269d5d020..8afac3a65eb3 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -1,10 +1,11 @@ import type { APICallError, ModelMessage } from "ai" import { unique } from "remeda" import type { JSONSchema } from "zod/v4/core" +import type { Provider } from "./provider" export namespace ProviderTransform { - function normalizeMessages(msgs: ModelMessage[], providerID: string, modelID: string): ModelMessage[] { - if (modelID.includes("claude")) { + function normalizeMessages(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] { + if (model.api.id.includes("claude")) { return msgs.map((msg) => { if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) { msg.content = msg.content.map((part) => { @@ -20,7 +21,7 @@ export namespace ProviderTransform { return msg }) } - if (providerID === "mistral" || modelID.toLowerCase().includes("mistral")) { + if (model.providerID === "mistral" || model.api.id.toLowerCase().includes("mistral")) { const result: ModelMessage[] = [] for (let i = 0; i < msgs.length; i++) { const msg = msgs[i] @@ -107,67 +108,68 @@ export namespace ProviderTransform { return msgs } - export function message(msgs: ModelMessage[], providerID: string, modelID: string) { - msgs = normalizeMessages(msgs, providerID, modelID) - if (providerID === "anthropic" || modelID.includes("anthropic") || modelID.includes("claude")) { - msgs = applyCaching(msgs, providerID) + export function message(msgs: ModelMessage[], model: Provider.Model) { + msgs = normalizeMessages(msgs, model) + if (model.providerID === "anthropic" || model.api.id.includes("anthropic") || model.api.id.includes("claude")) { + msgs = applyCaching(msgs, model.providerID) } return msgs } - export function temperature(_providerID: string, modelID: string) { - if (modelID.toLowerCase().includes("qwen")) return 0.55 - if (modelID.toLowerCase().includes("claude")) return undefined - if (modelID.toLowerCase().includes("gemini-3-pro")) return 1.0 + export function temperature(model: Provider.Model) { + if (model.api.id.toLowerCase().includes("qwen")) return 0.55 + if (model.api.id.toLowerCase().includes("claude")) return undefined + if (model.api.id.toLowerCase().includes("gemini-3-pro")) return 1.0 return 0 } - export function topP(_providerID: string, modelID: string) { - if (modelID.toLowerCase().includes("qwen")) return 1 + export function topP(model: Provider.Model) { + if (model.api.id.toLowerCase().includes("qwen")) return 1 return undefined } export function options( - providerID: string, - modelID: string, - npm: string, + model: Provider.Model, sessionID: string, providerOptions?: Record, ): Record { const result: Record = {} // switch to providerID later, for now use this - if (npm === "@openrouter/ai-sdk-provider") { + if (model.api.npm === "@openrouter/ai-sdk-provider") { result["usage"] = { include: true, } } - if (providerID === "openai" || providerOptions?.setCacheKey) { + if (model.providerID === "openai" || providerOptions?.setCacheKey) { result["promptCacheKey"] = sessionID } - if (providerID === "google" || (providerID.startsWith("opencode") && modelID.includes("gemini-3"))) { + if ( + model.providerID === "google" || + (model.providerID.startsWith("opencode") && model.api.id.includes("gemini-3")) + ) { result["thinkingConfig"] = { includeThoughts: true, } } - if (modelID.includes("gpt-5") && !modelID.includes("gpt-5-chat")) { - if (modelID.includes("codex")) { + if (model.providerID.includes("gpt-5") && !model.api.id.includes("gpt-5-chat")) { + if (model.providerID.includes("codex")) { result["store"] = false } - if (!modelID.includes("codex") && !modelID.includes("gpt-5-pro")) { + if (!model.api.id.includes("codex") && !model.api.id.includes("gpt-5-pro")) { result["reasoningEffort"] = "medium" } - if (modelID.endsWith("gpt-5.1") && providerID !== "azure") { + if (model.api.id.endsWith("gpt-5.1") && model.providerID !== "azure") { result["textVerbosity"] = "low" } - if (providerID.startsWith("opencode")) { + if (model.providerID.startsWith("opencode")) { result["promptCacheKey"] = sessionID result["include"] = ["reasoning.encrypted_content"] result["reasoningSummary"] = "auto" @@ -176,17 +178,17 @@ export namespace ProviderTransform { return result } - export function smallOptions(input: { providerID: string; modelID: string }) { + export function smallOptions(model: Provider.Model) { const options: Record = {} - if (input.providerID === "openai" || input.modelID.includes("gpt-5")) { - if (input.modelID.includes("5.1")) { + if (model.providerID === "openai" || model.api.id.includes("gpt-5")) { + if (model.api.id.includes("5.1")) { options["reasoningEffort"] = "low" } else { options["reasoningEffort"] = "minimal" } } - if (input.providerID === "google") { + if (model.providerID === "google") { options["thinkingConfig"] = { thinkingBudget: 0, } @@ -254,7 +256,7 @@ export namespace ProviderTransform { return standardLimit } - export function schema(providerID: string, modelID: string, schema: JSONSchema.BaseSchema) { + export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema) { /* if (["openai", "azure"].includes(providerID)) { if (schema.type === "object" && schema.properties) { @@ -274,7 +276,7 @@ export namespace ProviderTransform { */ // Convert integer enums to string enums for Google/Gemini - if (providerID === "google" || modelID.includes("gemini")) { + if (model.providerID === "google" || model.api.id.includes("gemini")) { const sanitizeGemini = (obj: any): any => { if (obj === null || typeof obj !== "object") { return obj diff --git a/packages/opencode/src/server/server.ts b/packages/opencode/src/server/server.ts index fe4ad195aab2..31d0822762b6 100644 --- a/packages/opencode/src/server/server.ts +++ b/packages/opencode/src/server/server.ts @@ -8,7 +8,7 @@ import { proxy } from "hono/proxy" import { Session } from "../session" import z from "zod" import { Provider } from "../provider/provider" -import { mapValues } from "remeda" +import { mapValues, pipe } from "remeda" import { NamedError } from "@opencode-ai/util/error" import { ModelsDev } from "../provider/models" import { Ripgrep } from "../file/ripgrep" @@ -296,8 +296,8 @@ export namespace Server { }), ), async (c) => { - const { provider, model } = c.req.valid("query") - const tools = await ToolRegistry.tools(provider, model) + const { provider } = c.req.valid("query") + const tools = await ToolRegistry.tools(provider) return c.json( tools.map((t) => ({ id: t.id, @@ -1025,7 +1025,7 @@ export namespace Server { async (c) => { c.status(204) c.header("Content-Type", "application/json") - return stream(c, async (stream) => { + return stream(c, async () => { const sessionID = c.req.valid("param").id const body = c.req.valid("json") SessionPrompt.prompt({ ...body, sessionID }) @@ -1231,7 +1231,7 @@ export namespace Server { "application/json": { schema: resolver( z.object({ - providers: ModelsDev.Provider.array(), + providers: Provider.Info.array(), default: z.record(z.string(), z.string()), }), ), @@ -1242,7 +1242,7 @@ export namespace Server { }), async (c) => { using _ = log.time("providers") - const providers = await Provider.list().then((x) => mapValues(x, (item) => item.info)) + const providers = await Provider.list().then((x) => mapValues(x, (item) => item)) return c.json({ providers: Object.values(providers), default: mapValues(providers, (item) => Provider.sort(Object.values(item.models))[0].id), @@ -1272,7 +1272,10 @@ export namespace Server { }, }), async (c) => { - const providers = await ModelsDev.get() + const providers = pipe( + await ModelsDev.get(), + mapValues((x) => Provider.fromModelsDevProvider(x)), + ) const connected = await Provider.list().then((x) => Object.keys(x)) return c.json({ all: Object.values(providers), diff --git a/packages/opencode/src/session/compaction.ts b/packages/opencode/src/session/compaction.ts index a6b71edcef26..b83adafbe3c3 100644 --- a/packages/opencode/src/session/compaction.ts +++ b/packages/opencode/src/session/compaction.ts @@ -1,4 +1,4 @@ -import { streamText, wrapLanguageModel, type ModelMessage } from "ai" +import { wrapLanguageModel, type ModelMessage } from "ai" import { Session } from "." import { Identifier } from "../id/id" import { Instance } from "../project/instance" @@ -7,7 +7,6 @@ import { MessageV2 } from "./message-v2" import { SystemPrompt } from "./system" import { Bus } from "../bus" import z from "zod" -import type { ModelsDev } from "../provider/models" import { SessionPrompt } from "./prompt" import { Flag } from "../flag/flag" import { Token } from "../util/token" @@ -29,7 +28,7 @@ export namespace SessionCompaction { ), } - export function isOverflow(input: { tokens: MessageV2.Assistant["tokens"]; model: ModelsDev.Model }) { + export function isOverflow(input: { tokens: MessageV2.Assistant["tokens"]; model: Provider.Model }) { if (Flag.OPENCODE_DISABLE_AUTOCOMPACT) return false const context = input.model.limit.context if (context === 0) return false @@ -98,6 +97,7 @@ export namespace SessionCompaction { auto: boolean }) { const model = await Provider.getModel(input.model.providerID, input.model.modelID) + const language = await Provider.getLanguage(model) const system = [...SystemPrompt.compaction(model.providerID)] const msg = (await Session.updateMessage({ id: Identifier.ascending("message"), @@ -126,79 +126,72 @@ export namespace SessionCompaction { const processor = SessionProcessor.create({ assistantMessage: msg, sessionID: input.sessionID, - providerID: input.model.providerID, - model: model.info, + model: model, abort: input.abort, }) - const result = await processor.process(() => - streamText({ - onError(error) { - log.error("stream error", { - error, - }) - }, - // set to 0, we handle loop - maxRetries: 0, - providerOptions: ProviderTransform.providerOptions( - model.npm, - model.providerID, - pipe( - {}, - mergeDeep(ProviderTransform.options(model.providerID, model.modelID, model.npm ?? "", input.sessionID)), - mergeDeep(model.info.options), - ), + const result = await processor.process({ + onError(error) { + log.error("stream error", { + error, + }) + }, + // set to 0, we handle loop + maxRetries: 0, + providerOptions: ProviderTransform.providerOptions( + model.api.npm, + model.providerID, + pipe({}, mergeDeep(ProviderTransform.options(model, input.sessionID)), mergeDeep(model.options)), + ), + headers: model.headers, + abortSignal: input.abort, + tools: model.capabilities.toolcall ? {} : undefined, + messages: [ + ...system.map( + (x): ModelMessage => ({ + role: "system", + content: x, + }), ), - headers: model.info.headers, - abortSignal: input.abort, - tools: model.info.tool_call ? {} : undefined, - messages: [ - ...system.map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), - ...MessageV2.toModelMessage( - input.messages.filter((m) => { - if (m.info.role !== "assistant" || m.info.error === undefined) { - return true - } - if ( - MessageV2.AbortedError.isInstance(m.info.error) && - m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") - ) { - return true - } + ...MessageV2.toModelMessage( + input.messages.filter((m) => { + if (m.info.role !== "assistant" || m.info.error === undefined) { + return true + } + if ( + MessageV2.AbortedError.isInstance(m.info.error) && + m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") + ) { + return true + } - return false - }), - ), - { - role: "user", - content: [ - { - type: "text", - text: "Summarize our conversation above. This summary will be the only context available when the conversation continues, so preserve critical information including: what was accomplished, current work in progress, files involved, next steps, and any key user requests or constraints. Be concise but detailed enough that work can continue seamlessly.", - }, - ], - }, - ], - model: wrapLanguageModel({ - model: model.language, - middleware: [ + return false + }), + ), + { + role: "user", + content: [ { - async transformParams(args) { - if (args.type === "stream") { - // @ts-expect-error - args.params.prompt = ProviderTransform.message(args.params.prompt, model.providerID, model.modelID) - } - return args.params - }, + type: "text", + text: "Summarize our conversation above. This summary will be the only context available when the conversation continues, so preserve critical information including: what was accomplished, current work in progress, files involved, next steps, and any key user requests or constraints. Be concise but detailed enough that work can continue seamlessly.", }, ], - }), + }, + ], + model: wrapLanguageModel({ + model: language, + middleware: [ + { + async transformParams(args) { + if (args.type === "stream") { + // @ts-expect-error + args.params.prompt = ProviderTransform.message(args.params.prompt, model.providerID, model.modelID) + } + return args.params + }, + }, + ], }), - ) + }) if (result === "continue" && input.auto) { const continueMsg = await Session.updateMessage({ id: Identifier.ascending("message"), diff --git a/packages/opencode/src/session/index.ts b/packages/opencode/src/session/index.ts index f09818caa2e6..6a148e973abf 100644 --- a/packages/opencode/src/session/index.ts +++ b/packages/opencode/src/session/index.ts @@ -6,8 +6,7 @@ import { Config } from "../config/config" import { Flag } from "../flag/flag" import { Identifier } from "../id/id" import { Installation } from "../installation" -import type { ModelsDev } from "../provider/models" -import { Share } from "../share/share" + import { Storage } from "../storage/storage" import { Log } from "../util/log" import { MessageV2 } from "./message-v2" @@ -16,7 +15,8 @@ import { SessionPrompt } from "./prompt" import { fn } from "@/util/fn" import { Command } from "../command" import { Snapshot } from "@/snapshot" -import { ShareNext } from "@/share/share-next" + +import type { Provider } from "@/provider/provider" export namespace Session { const log = Log.create({ service: "session" }) @@ -223,6 +223,7 @@ export namespace Session { } if (cfg.enterprise?.url) { + const { ShareNext } = await import("@/share/share-next") const share = await ShareNext.create(id) await update(id, (draft) => { draft.share = { @@ -233,6 +234,7 @@ export namespace Session { const session = await get(id) if (session.share) return session.share + const { Share } = await import("../share/share") const share = await Share.create(id) await update(id, (draft) => { draft.share = { @@ -253,6 +255,7 @@ export namespace Session { export const unshare = fn(Identifier.schema("session"), async (id) => { const cfg = await Config.get() if (cfg.enterprise?.url) { + const { ShareNext } = await import("@/share/share-next") await ShareNext.remove(id) await update(id, (draft) => { draft.share = undefined @@ -264,6 +267,7 @@ export namespace Session { await update(id, (draft) => { draft.share = undefined }) + const { Share } = await import("../share/share") await Share.remove(id, share.secret) }) @@ -389,7 +393,7 @@ export namespace Session { export const getUsage = fn( z.object({ - model: z.custom(), + model: z.custom(), usage: z.custom(), metadata: z.custom().optional(), }), @@ -420,16 +424,16 @@ export namespace Session { } const costInfo = - input.model.cost?.context_over_200k && tokens.input + tokens.cache.read > 200_000 - ? input.model.cost.context_over_200k + input.model.cost?.experimentalOver200K && tokens.input + tokens.cache.read > 200_000 + ? input.model.cost.experimentalOver200K : input.model.cost return { cost: safe( new Decimal(0) .add(new Decimal(tokens.input).mul(costInfo?.input ?? 0).div(1_000_000)) .add(new Decimal(tokens.output).mul(costInfo?.output ?? 0).div(1_000_000)) - .add(new Decimal(tokens.cache.read).mul(costInfo?.cache_read ?? 0).div(1_000_000)) - .add(new Decimal(tokens.cache.write).mul(costInfo?.cache_write ?? 0).div(1_000_000)) + .add(new Decimal(tokens.cache.read).mul(costInfo?.cache.read ?? 0).div(1_000_000)) + .add(new Decimal(tokens.cache.write).mul(costInfo?.cache.write ?? 0).div(1_000_000)) // TODO: update models.dev to have better pricing model, for now: // charge reasoning tokens at the same rate as output tokens .add(new Decimal(tokens.reasoning).mul(costInfo?.output ?? 0).div(1_000_000)) diff --git a/packages/opencode/src/session/processor.ts b/packages/opencode/src/session/processor.ts index 8655781d5ede..8b4faf02653b 100644 --- a/packages/opencode/src/session/processor.ts +++ b/packages/opencode/src/session/processor.ts @@ -1,6 +1,5 @@ -import type { ModelsDev } from "@/provider/models" import { MessageV2 } from "./message-v2" -import { type StreamTextResult, type Tool as AITool, APICallError } from "ai" +import { streamText } from "ai" import { Log } from "@/util/log" import { Identifier } from "@/id/id" import { Session } from "." @@ -11,6 +10,7 @@ import { SessionSummary } from "./summary" import { Bus } from "@/bus" import { SessionRetry } from "./retry" import { SessionStatus } from "./status" +import type { Provider } from "@/provider/provider" export namespace SessionProcessor { const DOOM_LOOP_THRESHOLD = 3 @@ -19,11 +19,19 @@ export namespace SessionProcessor { export type Info = Awaited> export type Result = Awaited> + export type StreamInput = Parameters[0] + + export type TBD = { + model: { + modelID: string + providerID: string + } + } + export function create(input: { assistantMessage: MessageV2.Assistant sessionID: string - providerID: string - model: ModelsDev.Model + model: Provider.Model abort: AbortSignal }) { const toolcalls: Record = {} @@ -38,13 +46,13 @@ export namespace SessionProcessor { partFromToolCall(toolCallID: string) { return toolcalls[toolCallID] }, - async process(fn: () => StreamTextResult, never>) { + async process(streamInput: StreamInput) { log.info("process") while (true) { try { let currentText: MessageV2.TextPart | undefined let reasoningMap: Record = {} - const stream = fn() + const stream = streamText(streamInput) for await (const value of stream.fullStream) { input.abort.throwIfAborted() @@ -328,11 +336,12 @@ export namespace SessionProcessor { continue } } - } catch (e) { + } catch (e: any) { log.error("process", { error: e, + stack: JSON.stringify(e.stack), }) - const error = MessageV2.fromError(e, { providerID: input.providerID }) + const error = MessageV2.fromError(e, { providerID: input.sessionID }) const retry = SessionRetry.retryable(error) if (retry !== undefined) { attempt++ diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts index ee58bb33809e..d82cbd718c33 100644 --- a/packages/opencode/src/session/prompt.ts +++ b/packages/opencode/src/session/prompt.ts @@ -11,7 +11,6 @@ import { Agent } from "../agent/agent" import { Provider } from "../provider/provider" import { generateText, - streamText, type ModelMessage, type Tool as AITool, tool, @@ -288,6 +287,7 @@ export namespace SessionPrompt { }) const model = await Provider.getModel(lastUser.model.providerID, lastUser.model.modelID) + const language = await Provider.getLanguage(model) const task = tasks.pop() // pending subtask @@ -311,7 +311,7 @@ export namespace SessionPrompt { reasoning: 0, cache: { read: 0, write: 0 }, }, - modelID: model.modelID, + modelID: model.id, providerID: model.providerID, time: { created: Date.now(), @@ -408,7 +408,7 @@ export namespace SessionPrompt { agent: lastUser.agent, model: { providerID: model.providerID, - modelID: model.modelID, + modelID: model.id, }, sessionID, auto: task.auto, @@ -421,7 +421,7 @@ export namespace SessionPrompt { if ( lastFinished && lastFinished.summary !== true && - SessionCompaction.isOverflow({ tokens: lastFinished.tokens, model: model.info }) + SessionCompaction.isOverflow({ tokens: lastFinished.tokens, model }) ) { await SessionCompaction.create({ sessionID, @@ -455,7 +455,7 @@ export namespace SessionPrompt { reasoning: 0, cache: { read: 0, write: 0 }, }, - modelID: model.modelID, + modelID: model.id, providerID: model.providerID, time: { created: Date.now(), @@ -463,20 +463,18 @@ export namespace SessionPrompt { sessionID, })) as MessageV2.Assistant, sessionID: sessionID, - model: model.info, - providerID: model.providerID, + model, abort, }) const system = await resolveSystemPrompt({ - providerID: model.providerID, - modelID: model.info.id, + model, agent, system: lastUser.system, }) const tools = await resolveTools({ agent, sessionID, - model: lastUser.model, + model, tools: lastUser.tools, processor, }) @@ -486,21 +484,19 @@ export namespace SessionPrompt { { sessionID: sessionID, agent: lastUser.agent, - model: model.info, + model: model, provider, message: lastUser, }, { - temperature: model.info.temperature - ? (agent.temperature ?? ProviderTransform.temperature(model.providerID, model.modelID)) + temperature: model.capabilities.temperature + ? (agent.temperature ?? ProviderTransform.temperature(model)) : undefined, - topP: agent.topP ?? ProviderTransform.topP(model.providerID, model.modelID), + topP: agent.topP ?? ProviderTransform.topP(model), options: pipe( {}, - mergeDeep( - ProviderTransform.options(model.providerID, model.modelID, model.npm ?? "", sessionID, provider?.options), - ), - mergeDeep(model.info.options), + mergeDeep(ProviderTransform.options(model, sessionID, provider?.options)), + mergeDeep(model.options), mergeDeep(agent.options), ), }, @@ -513,113 +509,111 @@ export namespace SessionPrompt { }) } - const result = await processor.process(() => - streamText({ - onError(error) { - log.error("stream error", { - error, + const result = await processor.process({ + onError(error) { + log.error("stream error", { + error, + }) + }, + async experimental_repairToolCall(input) { + const lower = input.toolCall.toolName.toLowerCase() + if (lower !== input.toolCall.toolName && tools[lower]) { + log.info("repairing tool call", { + tool: input.toolCall.toolName, + repaired: lower, }) - }, - async experimental_repairToolCall(input) { - const lower = input.toolCall.toolName.toLowerCase() - if (lower !== input.toolCall.toolName && tools[lower]) { - log.info("repairing tool call", { - tool: input.toolCall.toolName, - repaired: lower, - }) - return { - ...input.toolCall, - toolName: lower, - } - } return { ...input.toolCall, - input: JSON.stringify({ - tool: input.toolCall.toolName, - error: input.error.message, - }), - toolName: "invalid", + toolName: lower, } - }, - headers: { - ...(model.providerID.startsWith("opencode") - ? { - "x-opencode-project": Instance.project.id, - "x-opencode-session": sessionID, - "x-opencode-request": lastUser.id, - } - : undefined), - ...model.info.headers, - }, - // set to 0, we handle loop - maxRetries: 0, - activeTools: Object.keys(tools).filter((x) => x !== "invalid"), - maxOutputTokens: ProviderTransform.maxOutputTokens( - model.providerID, - params.options, - model.info.limit.output, - OUTPUT_TOKEN_MAX, + } + return { + ...input.toolCall, + input: JSON.stringify({ + tool: input.toolCall.toolName, + error: input.error.message, + }), + toolName: "invalid", + } + }, + headers: { + ...(model.providerID.startsWith("opencode") + ? { + "x-opencode-project": Instance.project.id, + "x-opencode-session": sessionID, + "x-opencode-request": lastUser.id, + } + : undefined), + ...model.headers, + }, + // set to 0, we handle loop + maxRetries: 0, + activeTools: Object.keys(tools).filter((x) => x !== "invalid"), + maxOutputTokens: ProviderTransform.maxOutputTokens( + model.api.npm, + params.options, + model.limit.output, + OUTPUT_TOKEN_MAX, + ), + abortSignal: abort, + providerOptions: ProviderTransform.providerOptions(model.api.npm, model.providerID, params.options), + stopWhen: stepCountIs(1), + temperature: params.temperature, + topP: params.topP, + messages: [ + ...system.map( + (x): ModelMessage => ({ + role: "system", + content: x, + }), ), - abortSignal: abort, - providerOptions: ProviderTransform.providerOptions(model.npm, model.providerID, params.options), - stopWhen: stepCountIs(1), - temperature: params.temperature, - topP: params.topP, - messages: [ - ...system.map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), - ...MessageV2.toModelMessage( - msgs.filter((m) => { - if (m.info.role !== "assistant" || m.info.error === undefined) { - return true - } - if ( - MessageV2.AbortedError.isInstance(m.info.error) && - m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") - ) { - return true - } + ...MessageV2.toModelMessage( + msgs.filter((m) => { + if (m.info.role !== "assistant" || m.info.error === undefined) { + return true + } + if ( + MessageV2.AbortedError.isInstance(m.info.error) && + m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") + ) { + return true + } - return false - }), - ), - ], - tools: model.info.tool_call === false ? undefined : tools, - model: wrapLanguageModel({ - model: model.language, - middleware: [ - { - async transformParams(args) { - if (args.type === "stream") { - // @ts-expect-error - args.params.prompt = ProviderTransform.message(args.params.prompt, model.providerID, model.modelID) - } - // Transform tool schemas for provider compatibility - if (args.params.tools && Array.isArray(args.params.tools)) { - args.params.tools = args.params.tools.map((tool: any) => { - // Tools at middleware level have inputSchema, not parameters - if (tool.inputSchema && typeof tool.inputSchema === "object") { - // Transform the inputSchema for provider compatibility - return { - ...tool, - inputSchema: ProviderTransform.schema(model.providerID, model.modelID, tool.inputSchema), - } + return false + }), + ), + ], + tools: model.capabilities.toolcall === false ? undefined : tools, + model: wrapLanguageModel({ + model: language, + middleware: [ + { + async transformParams(args) { + if (args.type === "stream") { + // @ts-expect-error - prompt types are compatible at runtime + args.params.prompt = ProviderTransform.message(args.params.prompt, model) + } + // Transform tool schemas for provider compatibility + if (args.params.tools && Array.isArray(args.params.tools)) { + args.params.tools = args.params.tools.map((tool: any) => { + // Tools at middleware level have inputSchema, not parameters + if (tool.inputSchema && typeof tool.inputSchema === "object") { + // Transform the inputSchema for provider compatibility + return { + ...tool, + inputSchema: ProviderTransform.schema(model, tool.inputSchema), } - // If no inputSchema, return tool unchanged - return tool - }) - } - return args.params - }, + } + // If no inputSchema, return tool unchanged + return tool + }) + } + return args.params }, - ], - }), + }, + ], }), - ) + }) if (result === "stop") break continue } @@ -642,18 +636,13 @@ export namespace SessionPrompt { return Provider.defaultModel() } - async function resolveSystemPrompt(input: { - system?: string - agent: Agent.Info - providerID: string - modelID: string - }) { - let system = SystemPrompt.header(input.providerID) + async function resolveSystemPrompt(input: { system?: string; agent: Agent.Info; model: Provider.Model }) { + let system = SystemPrompt.header(input.model.providerID) system.push( ...(() => { if (input.system) return [input.system] if (input.agent.prompt) return [input.agent.prompt] - return SystemPrompt.provider(input.modelID) + return SystemPrompt.provider(input.model) })(), ) system.push(...(await SystemPrompt.environment())) @@ -666,10 +655,7 @@ export namespace SessionPrompt { async function resolveTools(input: { agent: Agent.Info - model: { - providerID: string - modelID: string - } + model: Provider.Model sessionID: string tools?: Record processor: SessionProcessor.Info @@ -677,16 +663,12 @@ export namespace SessionPrompt { const tools: Record = {} const enabledTools = pipe( input.agent.tools, - mergeDeep(await ToolRegistry.enabled(input.model.providerID, input.model.modelID, input.agent)), + mergeDeep(await ToolRegistry.enabled(input.agent)), mergeDeep(input.tools ?? {}), ) - for (const item of await ToolRegistry.tools(input.model.providerID, input.model.modelID)) { + for (const item of await ToolRegistry.tools(input.model.providerID)) { if (Wildcard.all(item.id, enabledTools) === false) continue - const schema = ProviderTransform.schema( - input.model.providerID, - input.model.modelID, - z.toJSONSchema(item.parameters), - ) + const schema = ProviderTransform.schema(input.model, z.toJSONSchema(item.parameters)) tools[item.id] = tool({ id: item.id as any, description: item.description, @@ -1437,25 +1419,18 @@ export namespace SessionPrompt { if (!isFirst) return const small = (await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID)) + const language = await Provider.getLanguage(small) const provider = await Provider.getProvider(small.providerID) const options = pipe( {}, - mergeDeep( - ProviderTransform.options( - small.providerID, - small.modelID, - small.npm ?? "", - input.session.id, - provider?.options, - ), - ), - mergeDeep(ProviderTransform.smallOptions({ providerID: small.providerID, modelID: small.modelID })), - mergeDeep(small.info.options), + mergeDeep(ProviderTransform.options(small, input.session.id, provider?.options)), + mergeDeep(ProviderTransform.smallOptions(small)), + mergeDeep(small.options), ) await generateText({ // use higher # for reasoning models since reasoning tokens eat up a lot of the budget - maxOutputTokens: small.info.reasoning ? 3000 : 20, - providerOptions: ProviderTransform.providerOptions(small.npm, small.providerID, options), + maxOutputTokens: small.capabilities.reasoning ? 3000 : 20, + providerOptions: ProviderTransform.providerOptions(small.api.npm, small.providerID, options), messages: [ ...SystemPrompt.title(small.providerID).map( (x): ModelMessage => ({ @@ -1486,8 +1461,8 @@ export namespace SessionPrompt { }, ]), ], - headers: small.info.headers, - model: small.language, + headers: small.headers, + model: language, }) .then((result) => { if (result.text) @@ -1504,7 +1479,7 @@ export namespace SessionPrompt { }) }) .catch((error) => { - log.error("failed to generate title", { error, model: small.info.id }) + log.error("failed to generate title", { error, model: small.id }) }) } } diff --git a/packages/opencode/src/session/summary.ts b/packages/opencode/src/session/summary.ts index d9247f182dc4..8d366e4991c1 100644 --- a/packages/opencode/src/session/summary.ts +++ b/packages/opencode/src/session/summary.ts @@ -76,19 +76,20 @@ export namespace SessionSummary { const small = (await Provider.getSmallModel(assistantMsg.providerID)) ?? (await Provider.getModel(assistantMsg.providerID, assistantMsg.modelID)) + const language = await Provider.getLanguage(small) const options = pipe( {}, - mergeDeep(ProviderTransform.options(small.providerID, small.modelID, small.npm ?? "", assistantMsg.sessionID)), - mergeDeep(ProviderTransform.smallOptions({ providerID: small.providerID, modelID: small.modelID })), - mergeDeep(small.info.options), + mergeDeep(ProviderTransform.options(small, assistantMsg.sessionID)), + mergeDeep(ProviderTransform.smallOptions(small)), + mergeDeep(small.options), ) const textPart = msgWithParts.parts.find((p) => p.type === "text" && !p.synthetic) as MessageV2.TextPart if (textPart && !userMsg.summary?.title) { const result = await generateText({ - maxOutputTokens: small.info.reasoning ? 1500 : 20, - providerOptions: ProviderTransform.providerOptions(small.npm, small.providerID, options), + maxOutputTokens: small.capabilities.reasoning ? 1500 : 20, + providerOptions: ProviderTransform.providerOptions(small.api.npm, small.providerID, options), messages: [ ...SystemPrompt.title(small.providerID).map( (x): ModelMessage => ({ @@ -106,8 +107,8 @@ export namespace SessionSummary { `, }, ], - headers: small.info.headers, - model: small.language, + headers: small.headers, + model: language, }) log.info("title", { title: result.text }) userMsg.summary.title = result.text @@ -132,9 +133,9 @@ export namespace SessionSummary { } } const result = await generateText({ - model: small.language, + model: language, maxOutputTokens: 100, - providerOptions: ProviderTransform.providerOptions(small.npm, small.providerID, options), + providerOptions: ProviderTransform.providerOptions(small.api.npm, small.providerID, options), messages: [ ...SystemPrompt.summarize(small.providerID).map( (x): ModelMessage => ({ @@ -148,7 +149,7 @@ export namespace SessionSummary { content: `Summarize the above conversation according to your system prompts.`, }, ], - headers: small.info.headers, + headers: small.headers, }).catch(() => {}) if (result) summary = result.text } diff --git a/packages/opencode/src/session/system.ts b/packages/opencode/src/session/system.ts index 399cad8cde55..3146110cf3fc 100644 --- a/packages/opencode/src/session/system.ts +++ b/packages/opencode/src/session/system.ts @@ -17,6 +17,7 @@ import PROMPT_COMPACTION from "./prompt/compaction.txt" import PROMPT_SUMMARIZE from "./prompt/summarize.txt" import PROMPT_TITLE from "./prompt/title.txt" import PROMPT_CODEX from "./prompt/codex.txt" +import type { Provider } from "@/provider/provider" export namespace SystemPrompt { export function header(providerID: string) { @@ -24,12 +25,13 @@ export namespace SystemPrompt { return [] } - export function provider(modelID: string) { - if (modelID.includes("gpt-5")) return [PROMPT_CODEX] - if (modelID.includes("gpt-") || modelID.includes("o1") || modelID.includes("o3")) return [PROMPT_BEAST] - if (modelID.includes("gemini-")) return [PROMPT_GEMINI] - if (modelID.includes("claude")) return [PROMPT_ANTHROPIC] - if (modelID.includes("polaris-alpha")) return [PROMPT_POLARIS] + export function provider(model: Provider.Model) { + if (model.api.id.includes("gpt-5")) return [PROMPT_CODEX] + if (model.api.id.includes("gpt-") || model.api.id.includes("o1") || model.api.id.includes("o3")) + return [PROMPT_BEAST] + if (model.api.id.includes("gemini-")) return [PROMPT_GEMINI] + if (model.api.id.includes("claude")) return [PROMPT_ANTHROPIC] + if (model.api.id.includes("polaris-alpha")) return [PROMPT_POLARIS] return [PROMPT_ANTHROPIC_WITHOUT_TODO] } diff --git a/packages/opencode/src/share/share-next.ts b/packages/opencode/src/share/share-next.ts index 9543149a8135..996400280d1f 100644 --- a/packages/opencode/src/share/share-next.ts +++ b/packages/opencode/src/share/share-next.ts @@ -1,7 +1,6 @@ import { Bus } from "@/bus" import { Config } from "@/config/config" import { ulid } from "ulid" -import type { ModelsDev } from "@/provider/models" import { Provider } from "@/provider/provider" import { Session } from "@/session" import { MessageV2 } from "@/session/message-v2" @@ -36,7 +35,7 @@ export namespace ShareNext { type: "model", data: [ await Provider.getModel(evt.properties.info.model.providerID, evt.properties.info.model.modelID).then( - (m) => m.info, + (m) => m, ), ], }, @@ -105,7 +104,7 @@ export namespace ShareNext { } | { type: "model" - data: ModelsDev.Model[] + data: SDK.Model[] } const queue = new Map }>() @@ -171,7 +170,7 @@ export namespace ShareNext { messages .filter((m) => m.info.role === "user") .map((m) => (m.info as SDK.UserMessage).model) - .map((m) => Provider.getModel(m.providerID, m.modelID).then((m) => m.info)), + .map((m) => Provider.getModel(m.providerID, m.modelID).then((m) => m)), ) await sync(sessionID, [ { diff --git a/packages/opencode/src/tool/batch.ts b/packages/opencode/src/tool/batch.ts index 7d6449e7dcb0..cc61b090aa35 100644 --- a/packages/opencode/src/tool/batch.ts +++ b/packages/opencode/src/tool/batch.ts @@ -37,7 +37,7 @@ export const BatchTool = Tool.define("batch", async () => { const discardedCalls = params.tool_calls.slice(10) const { ToolRegistry } = await import("./registry") - const availableTools = await ToolRegistry.tools("", "") + const availableTools = await ToolRegistry.tools("") const toolMap = new Map(availableTools.map((t) => [t.id, t])) const executeCall = async (call: (typeof toolCalls)[0]) => { diff --git a/packages/opencode/src/tool/read.ts b/packages/opencode/src/tool/read.ts index cf7b20e8b307..7e01246b5392 100644 --- a/packages/opencode/src/tool/read.ts +++ b/packages/opencode/src/tool/read.ts @@ -101,7 +101,7 @@ export const ReadTool = Tool.define("read", { const modelID = ctx.extra["modelID"] as string const model = await Provider.getModel(providerID, modelID).catch(() => undefined) if (!model) return false - return model.info.modalities?.input?.includes("image") ?? false + return model.capabilities.input.image })() if (isImage) { if (!supportsImages) { diff --git a/packages/opencode/src/tool/registry.ts b/packages/opencode/src/tool/registry.ts index 26b6ea9fcf27..33a54675ffa8 100644 --- a/packages/opencode/src/tool/registry.ts +++ b/packages/opencode/src/tool/registry.ts @@ -108,7 +108,7 @@ export namespace ToolRegistry { return all().then((x) => x.map((t) => t.id)) } - export async function tools(providerID: string, _modelID: string) { + export async function tools(providerID: string) { const tools = await all() const result = await Promise.all( tools @@ -124,11 +124,7 @@ export namespace ToolRegistry { return result } - export async function enabled( - _providerID: string, - _modelID: string, - agent: Agent.Info, - ): Promise> { + export async function enabled(agent: Agent.Info): Promise> { const result: Record = {} if (agent.permission.edit === "deny") { diff --git a/packages/opencode/test/provider/provider.test.ts b/packages/opencode/test/provider/provider.test.ts index fa31d9d4f13c..698fdddfb423 100644 --- a/packages/opencode/test/provider/provider.test.ts +++ b/packages/opencode/test/provider/provider.test.ts @@ -132,7 +132,7 @@ test("model whitelist filters models for provider", async () => { fn: async () => { const providers = await Provider.list() expect(providers["anthropic"]).toBeDefined() - const models = Object.keys(providers["anthropic"].info.models) + const models = Object.keys(providers["anthropic"].models) expect(models).toContain("claude-sonnet-4-20250514") expect(models.length).toBe(1) }, @@ -163,7 +163,7 @@ test("model blacklist excludes specific models", async () => { fn: async () => { const providers = await Provider.list() expect(providers["anthropic"]).toBeDefined() - const models = Object.keys(providers["anthropic"].info.models) + const models = Object.keys(providers["anthropic"].models) expect(models).not.toContain("claude-sonnet-4-20250514") }, }) @@ -198,8 +198,8 @@ test("custom model alias via config", async () => { fn: async () => { const providers = await Provider.list() expect(providers["anthropic"]).toBeDefined() - expect(providers["anthropic"].info.models["my-alias"]).toBeDefined() - expect(providers["anthropic"].info.models["my-alias"].name).toBe("My Custom Alias") + expect(providers["anthropic"].models["my-alias"]).toBeDefined() + expect(providers["anthropic"].models["my-alias"].name).toBe("My Custom Alias") }, }) }) @@ -241,8 +241,8 @@ test("custom provider with npm package", async () => { fn: async () => { const providers = await Provider.list() expect(providers["custom-provider"]).toBeDefined() - expect(providers["custom-provider"].info.name).toBe("Custom Provider") - expect(providers["custom-provider"].info.models["custom-model"]).toBeDefined() + expect(providers["custom-provider"].name).toBe("Custom Provider") + expect(providers["custom-provider"].models["custom-model"]).toBeDefined() }, }) }) @@ -299,8 +299,9 @@ test("getModel returns model for valid provider/model", async () => { const model = await Provider.getModel("anthropic", "claude-sonnet-4-20250514") expect(model).toBeDefined() expect(model.providerID).toBe("anthropic") - expect(model.modelID).toBe("claude-sonnet-4-20250514") - expect(model.language).toBeDefined() + expect(model.id).toBe("claude-sonnet-4-20250514") + const language = await Provider.getLanguage(model) + expect(language).toBeDefined() }, }) }) @@ -478,11 +479,11 @@ test("model cost defaults to zero when not specified", async () => { directory: tmp.path, fn: async () => { const providers = await Provider.list() - const model = providers["test-provider"].info.models["test-model"] + const model = providers["test-provider"].models["test-model"] expect(model.cost.input).toBe(0) expect(model.cost.output).toBe(0) - expect(model.cost.cache_read).toBe(0) - expect(model.cost.cache_write).toBe(0) + expect(model.cost.cache.read).toBe(0) + expect(model.cost.cache.write).toBe(0) }, }) }) @@ -516,7 +517,7 @@ test("model options are merged from existing model", async () => { }, fn: async () => { const providers = await Provider.list() - const model = providers["anthropic"].info.models["claude-sonnet-4-20250514"] + const model = providers["anthropic"].models["claude-sonnet-4-20250514"] expect(model.options.customOption).toBe("custom-value") }, }) @@ -623,17 +624,17 @@ test("getModel uses realIdByKey for aliased models", async () => { }, fn: async () => { const providers = await Provider.list() - expect(providers["anthropic"].info.models["my-sonnet"]).toBeDefined() + expect(providers["anthropic"].models["my-sonnet"]).toBeDefined() const model = await Provider.getModel("anthropic", "my-sonnet") expect(model).toBeDefined() - expect(model.modelID).toBe("my-sonnet") - expect(model.info.name).toBe("My Sonnet Alias") + expect(model.id).toBe("my-sonnet") + expect(model.name).toBe("My Sonnet Alias") }, }) }) -test("provider api field sets default baseURL", async () => { +test("provider api field sets model api.url", async () => { await using tmp = await tmpdir({ init: async (dir) => { await Bun.write( @@ -666,7 +667,8 @@ test("provider api field sets default baseURL", async () => { directory: tmp.path, fn: async () => { const providers = await Provider.list() - expect(providers["custom-api"].options.baseURL).toBe("https://api.example.com/v1") + // api field is stored on model.api.url, used by getSDK to set baseURL + expect(providers["custom-api"].models["model-1"].api.url).toBe("https://api.example.com/v1") }, }) }) @@ -737,10 +739,10 @@ test("model inherits properties from existing database model", async () => { }, fn: async () => { const providers = await Provider.list() - const model = providers["anthropic"].info.models["claude-sonnet-4-20250514"] + const model = providers["anthropic"].models["claude-sonnet-4-20250514"] expect(model.name).toBe("Custom Name for Sonnet") - expect(model.tool_call).toBe(true) - expect(model.attachment).toBe(true) + expect(model.capabilities.toolcall).toBe(true) + expect(model.capabilities.attachment).toBe(true) expect(model.limit.context).toBeGreaterThan(0) }, }) @@ -820,7 +822,7 @@ test("whitelist and blacklist can be combined", async () => { fn: async () => { const providers = await Provider.list() expect(providers["anthropic"]).toBeDefined() - const models = Object.keys(providers["anthropic"].info.models) + const models = Object.keys(providers["anthropic"].models) expect(models).toContain("claude-sonnet-4-20250514") expect(models).not.toContain("claude-opus-4-20250514") expect(models.length).toBe(1) @@ -858,11 +860,9 @@ test("model modalities default correctly", async () => { directory: tmp.path, fn: async () => { const providers = await Provider.list() - const model = providers["test-provider"].info.models["test-model"] - expect(model.modalities).toEqual({ - input: ["text"], - output: ["text"], - }) + const model = providers["test-provider"].models["test-model"] + expect(model.capabilities.input.text).toBe(true) + expect(model.capabilities.output.text).toBe(true) }, }) }) @@ -903,11 +903,11 @@ test("model with custom cost values", async () => { directory: tmp.path, fn: async () => { const providers = await Provider.list() - const model = providers["test-provider"].info.models["test-model"] + const model = providers["test-provider"].models["test-model"] expect(model.cost.input).toBe(5) expect(model.cost.output).toBe(15) - expect(model.cost.cache_read).toBe(2.5) - expect(model.cost.cache_write).toBe(7.5) + expect(model.cost.cache.read).toBe(2.5) + expect(model.cost.cache.write).toBe(7.5) }, }) }) @@ -931,7 +931,7 @@ test("getSmallModel returns appropriate small model", async () => { fn: async () => { const model = await Provider.getSmallModel("anthropic") expect(model).toBeDefined() - expect(model?.modelID).toContain("haiku") + expect(model?.id).toContain("haiku") }, }) }) @@ -957,7 +957,7 @@ test("getSmallModel respects config small_model override", async () => { const model = await Provider.getSmallModel("anthropic") expect(model).toBeDefined() expect(model?.providerID).toBe("anthropic") - expect(model?.modelID).toBe("claude-sonnet-4-20250514") + expect(model?.id).toBe("claude-sonnet-4-20250514") }, }) }) @@ -1046,7 +1046,7 @@ test("provider with custom npm package", async () => { fn: async () => { const providers = await Provider.list() expect(providers["local-llm"]).toBeDefined() - expect(providers["local-llm"].info.npm).toBe("@ai-sdk/openai-compatible") + expect(providers["local-llm"].models["llama-3"].api.npm).toBe("@ai-sdk/openai-compatible") expect(providers["local-llm"].options.baseURL).toBe("http://localhost:11434/v1") }, }) @@ -1082,7 +1082,7 @@ test("model alias name defaults to alias key when id differs", async () => { }, fn: async () => { const providers = await Provider.list() - expect(providers["anthropic"].info.models["sonnet"].name).toBe("sonnet") + expect(providers["anthropic"].models["sonnet"].name).toBe("sonnet") }, }) }) @@ -1123,8 +1123,8 @@ test("provider with multiple env var options only includes apiKey when single en fn: async () => { const providers = await Provider.list() expect(providers["multi-env"]).toBeDefined() - // When multiple env options exist, apiKey should NOT be auto-set - expect(providers["multi-env"].options.apiKey).toBeUndefined() + // When multiple env options exist, key should NOT be auto-set + expect(providers["multi-env"].key).toBeUndefined() }, }) }) @@ -1165,8 +1165,8 @@ test("provider with single env var includes apiKey automatically", async () => { fn: async () => { const providers = await Provider.list() expect(providers["single-env"]).toBeDefined() - // Single env option should auto-set apiKey - expect(providers["single-env"].options.apiKey).toBe("my-api-key") + // Single env option should auto-set key + expect(providers["single-env"].key).toBe("my-api-key") }, }) }) @@ -1201,7 +1201,7 @@ test("model cost overrides existing cost values", async () => { }, fn: async () => { const providers = await Provider.list() - const model = providers["anthropic"].info.models["claude-sonnet-4-20250514"] + const model = providers["anthropic"].models["claude-sonnet-4-20250514"] expect(model.cost.input).toBe(999) expect(model.cost.output).toBe(888) }, @@ -1249,11 +1249,11 @@ test("completely new provider not in database can be configured", async () => { fn: async () => { const providers = await Provider.list() expect(providers["brand-new-provider"]).toBeDefined() - expect(providers["brand-new-provider"].info.name).toBe("Brand New") - const model = providers["brand-new-provider"].info.models["new-model"] - expect(model.reasoning).toBe(true) - expect(model.attachment).toBe(true) - expect(model.modalities?.input).toContain("image") + expect(providers["brand-new-provider"].name).toBe("Brand New") + const model = providers["brand-new-provider"].models["new-model"] + expect(model.capabilities.reasoning).toBe(true) + expect(model.capabilities.attachment).toBe(true) + expect(model.capabilities.input.image).toBe(true) }, }) }) @@ -1322,7 +1322,7 @@ test("model with tool_call false", async () => { directory: tmp.path, fn: async () => { const providers = await Provider.list() - expect(providers["no-tools"].info.models["basic-model"].tool_call).toBe(false) + expect(providers["no-tools"].models["basic-model"].capabilities.toolcall).toBe(false) }, }) }) @@ -1357,7 +1357,7 @@ test("model defaults tool_call to true when not specified", async () => { directory: tmp.path, fn: async () => { const providers = await Provider.list() - expect(providers["default-tools"].info.models["model"].tool_call).toBe(true) + expect(providers["default-tools"].models["model"].capabilities.toolcall).toBe(true) }, }) }) @@ -1396,7 +1396,7 @@ test("model headers are preserved", async () => { directory: tmp.path, fn: async () => { const providers = await Provider.list() - const model = providers["headers-provider"].info.models["model"] + const model = providers["headers-provider"].models["model"] expect(model.headers).toEqual({ "X-Custom-Header": "custom-value", Authorization: "Bearer special-token", @@ -1465,8 +1465,8 @@ test("getModel returns consistent results", async () => { const model1 = await Provider.getModel("anthropic", "claude-sonnet-4-20250514") const model2 = await Provider.getModel("anthropic", "claude-sonnet-4-20250514") expect(model1.providerID).toEqual(model2.providerID) - expect(model1.modelID).toEqual(model2.modelID) - expect(model1.info).toEqual(model2.info) + expect(model1.id).toEqual(model2.id) + expect(model1).toEqual(model2) }, }) }) @@ -1501,7 +1501,7 @@ test("provider name defaults to id when not in database", async () => { directory: tmp.path, fn: async () => { const providers = await Provider.list() - expect(providers["my-custom-id"].info.name).toBe("my-custom-id") + expect(providers["my-custom-id"].name).toBe("my-custom-id") }, }) }) @@ -1601,7 +1601,7 @@ test("getProvider returns provider info", async () => { fn: async () => { const provider = await Provider.getProvider("anthropic") expect(provider).toBeDefined() - expect(provider?.info.id).toBe("anthropic") + expect(provider?.id).toBe("anthropic") }, }) }) @@ -1684,7 +1684,7 @@ test("model limit defaults to zero when not specified", async () => { directory: tmp.path, fn: async () => { const providers = await Provider.list() - const model = providers["no-limit"].info.models["model"] + const model = providers["no-limit"].models["model"] expect(model.limit.context).toBe(0) expect(model.limit.output).toBe(0) }, diff --git a/packages/sdk/js/src/gen/types.gen.ts b/packages/sdk/js/src/gen/types.gen.ts index 80348fb9ad40..6c80f0b7c52f 100644 --- a/packages/sdk/js/src/gen/types.gen.ts +++ b/packages/sdk/js/src/gen/types.gen.ts @@ -942,6 +942,75 @@ export type AgentConfig = { | undefined } +export type ProviderConfig = { + api?: string + name?: string + env?: Array + id?: string + npm?: string + models?: { + [key: string]: { + id?: string + name?: string + release_date?: string + attachment?: boolean + reasoning?: boolean + temperature?: boolean + tool_call?: boolean + cost?: { + input: number + output: number + cache_read?: number + cache_write?: number + context_over_200k?: { + input: number + output: number + cache_read?: number + cache_write?: number + } + } + limit?: { + context: number + output: number + } + modalities?: { + input: Array<"text" | "audio" | "image" | "video" | "pdf"> + output: Array<"text" | "audio" | "image" | "video" | "pdf"> + } + experimental?: boolean + status?: "alpha" | "beta" | "deprecated" + options?: { + [key: string]: unknown + } + headers?: { + [key: string]: string + } + provider?: { + npm: string + } + } + } + whitelist?: Array + blacklist?: Array + options?: { + apiKey?: string + baseURL?: string + /** + * GitHub Enterprise URL for copilot authentication + */ + enterpriseUrl?: string + /** + * Enable promptCacheKey for this provider (default false) + */ + setCacheKey?: boolean + /** + * Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout. + */ + timeout?: number | false + [key: string]: unknown | string | boolean | (number | false) | undefined + } +} + export type McpLocalConfig = { /** * Type of MCP server connection @@ -1100,74 +1169,7 @@ export type Config = { * Custom provider configurations and model overrides */ provider?: { - [key: string]: { - api?: string - name?: string - env?: Array - id?: string - npm?: string - models?: { - [key: string]: { - id?: string - name?: string - release_date?: string - attachment?: boolean - reasoning?: boolean - temperature?: boolean - tool_call?: boolean - cost?: { - input: number - output: number - cache_read?: number - cache_write?: number - context_over_200k?: { - input: number - output: number - cache_read?: number - cache_write?: number - } - } - limit?: { - context: number - output: number - } - modalities?: { - input: Array<"text" | "audio" | "image" | "video" | "pdf"> - output: Array<"text" | "audio" | "image" | "video" | "pdf"> - } - experimental?: boolean - status?: "alpha" | "beta" | "deprecated" - options?: { - [key: string]: unknown - } - headers?: { - [key: string]: string - } - provider?: { - npm: string - } - } - } - whitelist?: Array - blacklist?: Array - options?: { - apiKey?: string - baseURL?: string - /** - * GitHub Enterprise URL for copilot authentication - */ - enterpriseUrl?: string - /** - * Enable promptCacheKey for this provider (default false) - */ - setCacheKey?: boolean - /** - * Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout. - */ - timeout?: number | false - [key: string]: unknown | string | boolean | (number | false) | undefined - } - } + [key: string]: ProviderConfig } /** * MCP (Model Context Protocol) server configurations @@ -1354,51 +1356,71 @@ export type Command = { export type Model = { id: string + providerID: string + api: { + id: string + url: string + npm: string + } name: string - release_date: string - attachment: boolean - reasoning: boolean - temperature: boolean - tool_call: boolean + capabilities: { + temperature: boolean + reasoning: boolean + attachment: boolean + toolcall: boolean + input: { + text: boolean + audio: boolean + image: boolean + video: boolean + pdf: boolean + } + output: { + text: boolean + audio: boolean + image: boolean + video: boolean + pdf: boolean + } + } cost: { input: number output: number - cache_read?: number - cache_write?: number - context_over_200k?: { + cache: { + read: number + write: number + } + experimentalOver200K?: { input: number output: number - cache_read?: number - cache_write?: number + cache: { + read: number + write: number + } } } limit: { context: number output: number } - modalities?: { - input: Array<"text" | "audio" | "image" | "video" | "pdf"> - output: Array<"text" | "audio" | "image" | "video" | "pdf"> - } - experimental?: boolean - status?: "alpha" | "beta" | "deprecated" + status: "alpha" | "beta" | "deprecated" | "active" options: { [key: string]: unknown } - headers?: { + headers: { [key: string]: string } - provider?: { - npm: string - } } export type Provider = { - api?: string + id: string name: string + source: "env" | "config" | "custom" | "api" env: Array - id: string - npm?: string + key?: string + options: { + [key: string]: unknown + } models: { [key: string]: Model } @@ -2665,7 +2687,55 @@ export type ProviderListResponses = { * List of providers */ 200: { - all: Array + all: Array<{ + api?: string + name: string + env: Array + id: string + npm?: string + models: { + [key: string]: { + id: string + name: string + release_date: string + attachment: boolean + reasoning: boolean + temperature: boolean + tool_call: boolean + cost?: { + input: number + output: number + cache_read?: number + cache_write?: number + context_over_200k?: { + input: number + output: number + cache_read?: number + cache_write?: number + } + } + limit: { + context: number + output: number + } + modalities?: { + input: Array<"text" | "audio" | "image" | "video" | "pdf"> + output: Array<"text" | "audio" | "image" | "video" | "pdf"> + } + experimental?: boolean + status?: "alpha" | "beta" | "deprecated" + options: { + [key: string]: unknown + } + headers?: { + [key: string]: string + } + provider?: { + npm: string + } + } + } + }> default: { [key: string]: string } From e8c9b21f20195e5fb4a5990e2d039ba2b352c7be Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 20:33:08 -0600 Subject: [PATCH 18/27] bump opentui --- bun.lock | 20 ++++++++++---------- packages/opencode/package.json | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/bun.lock b/bun.lock index 714384ceb172..0b60b312305a 100644 --- a/bun.lock +++ b/bun.lock @@ -238,8 +238,8 @@ "@opencode-ai/sdk": "workspace:*", "@opencode-ai/util": "workspace:*", "@openrouter/ai-sdk-provider": "1.2.8", - "@opentui/core": "0.1.55", - "@opentui/solid": "0.1.55", + "@opentui/core": "0.1.56", + "@opentui/solid": "0.1.56", "@parcel/watcher": "2.5.1", "@pierre/precision-diffs": "catalog:", "@solid-primitives/event-bus": "1.1.2", @@ -1088,21 +1088,21 @@ "@opentelemetry/api": ["@opentelemetry/api@1.9.0", "", {}, "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg=="], - "@opentui/core": ["@opentui/core@0.1.55", "", { "dependencies": { "bun-ffi-structs": "0.1.2", "diff": "8.0.2", "jimp": "1.6.0", "yoga-layout": "3.2.1" }, "optionalDependencies": { "@dimforge/rapier2d-simd-compat": "^0.17.3", "@opentui/core-darwin-arm64": "0.1.55", "@opentui/core-darwin-x64": "0.1.55", "@opentui/core-linux-arm64": "0.1.55", "@opentui/core-linux-x64": "0.1.55", "@opentui/core-win32-arm64": "0.1.55", "@opentui/core-win32-x64": "0.1.55", "bun-webgpu": "0.1.4", "planck": "^1.4.2", "three": "0.177.0" }, "peerDependencies": { "web-tree-sitter": "0.25.10" } }, "sha512-WafOO8eMf1/fmlFUfgooWyWJclQG5X/72VUebH+jN6/kSoSb91XJxHQgaKL9CQYFBNBIApQhAZn/sF9Qt60+lQ=="], + "@opentui/core": ["@opentui/core@0.1.56", "", { "dependencies": { "bun-ffi-structs": "0.1.2", "diff": "8.0.2", "jimp": "1.6.0", "yoga-layout": "3.2.1" }, "optionalDependencies": { "@dimforge/rapier2d-simd-compat": "^0.17.3", "@opentui/core-darwin-arm64": "0.1.56", "@opentui/core-darwin-x64": "0.1.56", "@opentui/core-linux-arm64": "0.1.56", "@opentui/core-linux-x64": "0.1.56", "@opentui/core-win32-arm64": "0.1.56", "@opentui/core-win32-x64": "0.1.56", "bun-webgpu": "0.1.4", "planck": "^1.4.2", "three": "0.177.0" }, "peerDependencies": { "web-tree-sitter": "0.25.10" } }, "sha512-TI5cSCPYythHIQYpAEdXyZhewGACn2TfnfC1qZmrSyEq33zFo4W7zpQ4EZNpy9xZJFCI+elAUVJFARwhudp9EQ=="], - "@opentui/core-darwin-arm64": ["@opentui/core-darwin-arm64@0.1.55", "", { "os": "darwin", "cpu": "arm64" }, "sha512-z2Prd/KKUbhPaSGBFv2q0nDtiLB/5oI3sGFDgf+YAfs6M6UfP9n0XkPUupbE1dx4lMyvwA9X8/QUnsQApd3E2g=="], + "@opentui/core-darwin-arm64": ["@opentui/core-darwin-arm64@0.1.56", "", { "os": "darwin", "cpu": "arm64" }, "sha512-x5U9J2k1Fmbb9Mdh1nOd/yZVpg4ARCrV5pFngpaeKrIWDhs8RLpQW3ap+r7uyFLGFkSn4h5wBR0jj6Dg+Tyw+A=="], - "@opentui/core-darwin-x64": ["@opentui/core-darwin-x64@0.1.55", "", { "os": "darwin", "cpu": "x64" }, "sha512-zjgGmIaTCWUvvQ9vIHJ0ypTkuFIA4ykKiZ16QxpG930bPr9fJ1xZ8MYj+6WSyuiao7tm6iWQfuYKT3tzA8+ItQ=="], + "@opentui/core-darwin-x64": ["@opentui/core-darwin-x64@0.1.56", "", { "os": "darwin", "cpu": "x64" }, "sha512-7swq9rV/SaNVBWoUbC7mlP1VNyKBl7SSwmyVMkcaBP71lkm95zWuh4pgGj82fLgZ9gITRBD95TJVDmTovOyW0A=="], - "@opentui/core-linux-arm64": ["@opentui/core-linux-arm64@0.1.55", "", { "os": "linux", "cpu": "arm64" }, "sha512-77EZtLxH0VW/Kw+6kTs9FrFWfhjaIjsK/o39DAWM1ZNdFDTXAa/MQNOFDlBXbNHiNqPOyxd0tol1nUFLr8ZtZg=="], + "@opentui/core-linux-arm64": ["@opentui/core-linux-arm64@0.1.56", "", { "os": "linux", "cpu": "arm64" }, "sha512-v8b+kiTlynAJzR0hFeVpGFzVi5PGqXAe3Zql9iTiQqTExkm/sR34sfC/P6rBOUhuAnos8ovPDKWtDb6eCTSm9g=="], - "@opentui/core-linux-x64": ["@opentui/core-linux-x64@0.1.55", "", { "os": "linux", "cpu": "x64" }, "sha512-o4RB1jqKWx4TM9v2trGUij6H2ymJCxID8BK3HWvRIjd71tpKkaMY4SxaMWGzvK89X40u8v9qKE04dileKNa10w=="], + "@opentui/core-linux-x64": ["@opentui/core-linux-x64@0.1.56", "", { "os": "linux", "cpu": "x64" }, "sha512-lbxgvAi5SBswK/2hoMPtLhPvJxASgquPUwvGTRHqzDkCvrOChP/loTjBQpL09/nAFc3jbM3SAbZtnEgA2SGYVw=="], - "@opentui/core-win32-arm64": ["@opentui/core-win32-arm64@0.1.55", "", { "os": "win32", "cpu": "arm64" }, "sha512-SYna371ZcQme6XjGI2ESHM2uMUZQRi9kgtJj5E22uH4wgBpPWFwf83EGWv78v+irvsypR+ZJgVfkwkz6JjgVTQ=="], + "@opentui/core-win32-arm64": ["@opentui/core-win32-arm64@0.1.56", "", { "os": "win32", "cpu": "arm64" }, "sha512-RoCAbvDo+59OevX+6GrEGbaueERiBVnTaWJkrS41hRAD2fFS3CZpW7UuS5jIg7zn5clHmOGyfvCiBkTRXmgkhw=="], - "@opentui/core-win32-x64": ["@opentui/core-win32-x64@0.1.55", "", { "os": "win32", "cpu": "x64" }, "sha512-ViPBCm+EtZ/4NmLqLAxcz31lVYGCe1ily+YmfAkoq1K/iuiXGhtD3mDrQuc3ayfTT8w5UwiYKyrUibJVJ/noVQ=="], + "@opentui/core-win32-x64": ["@opentui/core-win32-x64@0.1.56", "", { "os": "win32", "cpu": "x64" }, "sha512-i6N5TjZU5gRkJsKmH8e/qY9vwSk0rh6A5t37mHDGlzN4E5yO/MbBrYH4ppLp5stps9Zfi1Re51ofJX1s2hZY/Q=="], - "@opentui/solid": ["@opentui/solid@0.1.55", "", { "dependencies": { "@babel/core": "7.28.0", "@babel/preset-typescript": "7.27.1", "@opentui/core": "0.1.55", "babel-plugin-module-resolver": "5.0.2", "babel-preset-solid": "1.9.9", "s-js": "^0.4.9" }, "peerDependencies": { "solid-js": "1.9.9" } }, "sha512-6NWOt0k32tu5KCiddLkPjVNWd++vW3QNbEccuGOdSiotO5TuwK4g0rcLAG6haPOB7Mf/l6aC06FQNeexpBqvtQ=="], + "@opentui/solid": ["@opentui/solid@0.1.56", "", { "dependencies": { "@babel/core": "7.28.0", "@babel/preset-typescript": "7.27.1", "@opentui/core": "0.1.56", "babel-plugin-module-resolver": "5.0.2", "babel-preset-solid": "1.9.9", "s-js": "^0.4.9" }, "peerDependencies": { "solid-js": "1.9.9" } }, "sha512-3R7AfxsYHUyehwJK98rt5dI9u2WCT/uH/CYvddZIgXPHyfFm1SHJekMdy3DUoiQTCUllt68eFGKMv9zRi6Laww=="], "@oslojs/asn1": ["@oslojs/asn1@1.0.0", "", { "dependencies": { "@oslojs/binary": "1.0.0" } }, "sha512-zw/wn0sj0j0QKbIXfIlnEcTviaCzYOY3V5rAyjR6YtOByFtJiT574+8p9Wlach0lZH9fddD4yb9laEAIl4vXQA=="], diff --git a/packages/opencode/package.json b/packages/opencode/package.json index c3312e187d48..687c0d385b72 100644 --- a/packages/opencode/package.json +++ b/packages/opencode/package.json @@ -64,8 +64,8 @@ "@opencode-ai/sdk": "workspace:*", "@opencode-ai/util": "workspace:*", "@openrouter/ai-sdk-provider": "1.2.8", - "@opentui/core": "0.1.55", - "@opentui/solid": "0.1.55", + "@opentui/core": "0.1.56", + "@opentui/solid": "0.1.56", "@parcel/watcher": "2.5.1", "@pierre/precision-diffs": "catalog:", "@solid-primitives/event-bus": "1.1.2", From 38bff1b372339af068dfc02d25f124a7b4911ee2 Mon Sep 17 00:00:00 2001 From: Github Action Date: Thu, 4 Dec 2025 02:34:26 +0000 Subject: [PATCH 19/27] Update Nix flake.lock and hashes --- flake.lock | 6 +++--- nix/hashes.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/flake.lock b/flake.lock index 45c31d9ccf28..4e7cf41e1b73 100644 --- a/flake.lock +++ b/flake.lock @@ -2,11 +2,11 @@ "nodes": { "nixpkgs": { "locked": { - "lastModified": 1764642553, - "narHash": "sha256-mvbFFzVBhVK1FjyPHZGMAKpNiqkr7k++xIwy+p/NQvA=", + "lastModified": 1764733908, + "narHash": "sha256-QJiih52NU+nm7XQWCj+K8SwUdIEayDQ1FQgjkYISt4I=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f720de59066162ee879adcc8c79e15c51fe6bfb4", + "rev": "cadcc8de247676e4751c9d4a935acb2c0b059113", "type": "github" }, "original": { diff --git a/nix/hashes.json b/nix/hashes.json index 7c7fc45f63ee..47634e2ed82e 100644 --- a/nix/hashes.json +++ b/nix/hashes.json @@ -1,3 +1,3 @@ { - "nodeModules": "sha256-QhqAa47P3Y2aoMGnr8l1nLq0EQb4qEm75dGfNjyzbpU=" + "nodeModules": "sha256-ZGKC7h4ScHDzVYj8qb1lN/weZhyZivPS8kpNAZvgO0I=" } From 598d63db6349f9ff4c05bfe798f51e8881211e4f Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 20:38:55 -0600 Subject: [PATCH 20/27] fix: dax typo --- packages/opencode/src/provider/transform.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 8afac3a65eb3..c703a57e1cc5 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -156,7 +156,7 @@ export namespace ProviderTransform { } } - if (model.providerID.includes("gpt-5") && !model.api.id.includes("gpt-5-chat")) { + if (model.api.id.includes("gpt-5") && !model.api.id.includes("gpt-5-chat")) { if (model.providerID.includes("codex")) { result["store"] = false } From f33f8ca1098f1261a80ed67d74a9fef8e478264c Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 20:43:47 -0600 Subject: [PATCH 21/27] fix: compaction type issue --- packages/opencode/src/session/compaction.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/opencode/src/session/compaction.ts b/packages/opencode/src/session/compaction.ts index b83adafbe3c3..07468995b670 100644 --- a/packages/opencode/src/session/compaction.ts +++ b/packages/opencode/src/session/compaction.ts @@ -184,7 +184,7 @@ export namespace SessionCompaction { async transformParams(args) { if (args.type === "stream") { // @ts-expect-error - args.params.prompt = ProviderTransform.message(args.params.prompt, model.providerID, model.modelID) + args.params.prompt = ProviderTransform.message(args.params.prompt, model) } return args.params }, From 32b5db754e948e010db8b010ba26d3331a747d77 Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 20:45:55 -0600 Subject: [PATCH 22/27] fix: provider id issue --- packages/opencode/src/session/processor.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/opencode/src/session/processor.ts b/packages/opencode/src/session/processor.ts index 8b4faf02653b..bf994755c1f5 100644 --- a/packages/opencode/src/session/processor.ts +++ b/packages/opencode/src/session/processor.ts @@ -341,7 +341,7 @@ export namespace SessionProcessor { error: e, stack: JSON.stringify(e.stack), }) - const error = MessageV2.fromError(e, { providerID: input.sessionID }) + const error = MessageV2.fromError(e, { providerID: input.model.providerID }) const retry = SessionRetry.retryable(error) if (retry !== undefined) { attempt++ From 88cfb979bec05d31ad01be9ccc3fd998a4429987 Mon Sep 17 00:00:00 2001 From: Aiden Cline Date: Wed, 3 Dec 2025 21:08:12 -0600 Subject: [PATCH 23/27] ci: add note about iife --- .github/workflows/review.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml index c726081fc8b7..32c7c7b1144c 100644 --- a/.github/workflows/review.yml +++ b/.github/workflows/review.yml @@ -74,7 +74,9 @@ jobs: Please check all the code changes in this pull request against the style guide, also look for any bugs if they exist. Diffs are important but make sure you read the entire file to get proper context. Make it clear the suggestions are merely suggestions and the human can decide what to do + When critiquing code against the style guide, be sure that the code is ACTUALLY in violation, don't complain about else statements if they already use early returns there. You may complain about excessive nesting though, regardless of else statement usage. + When critiquing code style don't be a zealot, we don't like "let" statements but sometimes they are the simpliest option, if someone does a bunch of nesting with let, they should consider using iife (see packages/opencode/src/util.iife.ts) Use the gh cli to create comments on the files for the violations. Try to leave the comment on the exact line number. If you have a suggested fix include it in a suggestion code block. From 4bc3fa08261c1e778c08b5b7bc75bbad95fb2323 Mon Sep 17 00:00:00 2001 From: Jack Bisceglia <57922742+jackbisceglia@users.noreply.github.com> Date: Wed, 3 Dec 2025 22:10:26 -0500 Subject: [PATCH 24/27] docs: remove outdated theme section as system theme is now added back (#5041) --- packages/web/src/content/docs/1-0.mdx | 4 ---- 1 file changed, 4 deletions(-) diff --git a/packages/web/src/content/docs/1-0.mdx b/packages/web/src/content/docs/1-0.mdx index 6737482a7cf3..11d3b629f43d 100644 --- a/packages/web/src/content/docs/1-0.mdx +++ b/packages/web/src/content/docs/1-0.mdx @@ -44,10 +44,6 @@ We removed some functionality that we weren't sure anyone actually used. If some ## Breaking changes -### Theme - -The `system` theme has not yet been ported and custom themes aren't loaded yet but both of these will be fixed this week. - ### Keybinds renamed - messages_revert -> messages_undo From 46790e57e9abfa363c3030d57d033f4b13b5c91e Mon Sep 17 00:00:00 2001 From: Jakub Matjanowski Date: Thu, 4 Dec 2025 04:31:36 +0100 Subject: [PATCH 25/27] feat: Enhance DeepSeek reasoning content handling (#4975) Co-authored-by: Aiden Cline --- packages/opencode/src/provider/transform.ts | 40 ++++ .../opencode/test/provider/transform.test.ts | 207 ++++++++++++++++++ 2 files changed, 247 insertions(+) diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index c703a57e1cc5..09dfd69a317e 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -63,6 +63,46 @@ export namespace ProviderTransform { return result } + // DeepSeek: Handle reasoning_content for tool call continuations + // - With tool calls: Include reasoning_content in providerOptions so model can continue reasoning + // - Without tool calls: Strip reasoning (new turn doesn't need previous reasoning) + // See: https://api-docs.deepseek.com/guides/thinking_mode + if (model.providerID === "deepseek" || model.api.id.toLowerCase().includes("deepseek")) { + return msgs.map((msg) => { + if (msg.role === "assistant" && Array.isArray(msg.content)) { + const reasoningParts = msg.content.filter((part: any) => part.type === "reasoning") + const hasToolCalls = msg.content.some((part: any) => part.type === "tool-call") + const reasoningText = reasoningParts.map((part: any) => part.text).join("") + + // Filter out reasoning parts from content + const filteredContent = msg.content.filter((part: any) => part.type !== "reasoning") + + // If this message has tool calls and reasoning, include reasoning_content + // so DeepSeek can continue reasoning after tool execution + if (hasToolCalls && reasoningText) { + return { + ...msg, + content: filteredContent, + providerOptions: { + ...msg.providerOptions, + openaiCompatible: { + ...(msg.providerOptions as any)?.openaiCompatible, + reasoning_content: reasoningText, + }, + }, + } + } + + // For final answers (no tool calls), just strip reasoning + return { + ...msg, + content: filteredContent, + } + } + return msg + }) + } + return msgs } diff --git a/packages/opencode/test/provider/transform.test.ts b/packages/opencode/test/provider/transform.test.ts index e6080d54c68d..648f108bd66f 100644 --- a/packages/opencode/test/provider/transform.test.ts +++ b/packages/opencode/test/provider/transform.test.ts @@ -96,3 +96,210 @@ describe("ProviderTransform.maxOutputTokens", () => { }) }) }) + +describe("ProviderTransform.message - DeepSeek reasoning content", () => { + test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => { + const msgs = [ + { + role: "assistant", + content: [ + { type: "reasoning", text: "Let me think about this..." }, + { + type: "tool-call", + toolCallId: "test", + toolName: "bash", + input: { command: "echo hello" }, + }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, { + id: "deepseek/deepseek-chat", + providerID: "deepseek", + api: { + id: "deepseek-chat", + url: "https://api.deepseek.com", + npm: "@ai-sdk/openai-compatible", + }, + name: "DeepSeek Chat", + capabilities: { + temperature: true, + reasoning: true, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + }, + cost: { + input: 0.001, + output: 0.002, + cache: { read: 0.0001, write: 0.0002 }, + }, + limit: { + context: 128000, + output: 8192, + }, + status: "active", + options: {}, + headers: {}, + }) + + expect(result).toHaveLength(1) + expect(result[0].content).toEqual([ + { + type: "tool-call", + toolCallId: "test", + toolName: "bash", + input: { command: "echo hello" }, + }, + ]) + expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...") + }) + + test("DeepSeek without tool calls strips reasoning from content", () => { + const msgs = [ + { + role: "assistant", + content: [ + { type: "reasoning", text: "Let me think about this..." }, + { type: "text", text: "Final answer" }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, { + id: "deepseek/deepseek-chat", + providerID: "deepseek", + api: { + id: "deepseek-chat", + url: "https://api.deepseek.com", + npm: "@ai-sdk/openai-compatible", + }, + name: "DeepSeek Chat", + capabilities: { + temperature: true, + reasoning: true, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + }, + cost: { + input: 0.001, + output: 0.002, + cache: { read: 0.0001, write: 0.0002 }, + }, + limit: { + context: 128000, + output: 8192, + }, + status: "active", + options: {}, + headers: {}, + }) + + expect(result).toHaveLength(1) + expect(result[0].content).toEqual([{ type: "text", text: "Final answer" }]) + expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined() + }) + + test("DeepSeek model ID containing 'deepseek' matches (case insensitive)", () => { + const msgs = [ + { + role: "assistant", + content: [ + { type: "reasoning", text: "Thinking..." }, + { + type: "tool-call", + toolCallId: "test", + toolName: "get_weather", + input: { location: "Hangzhou" }, + }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, { + id: "someprovider/deepseek-reasoner", + providerID: "someprovider", + api: { + id: "deepseek-reasoner", + url: "https://api.someprovider.com", + npm: "@ai-sdk/openai-compatible", + }, + name: "SomeProvider DeepSeek Reasoner", + capabilities: { + temperature: true, + reasoning: true, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + }, + cost: { + input: 0.001, + output: 0.002, + cache: { read: 0.0001, write: 0.0002 }, + }, + limit: { + context: 128000, + output: 8192, + }, + status: "active", + options: {}, + headers: {}, + }) + + expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Thinking...") + }) + + test("Non-DeepSeek providers leave reasoning content unchanged", () => { + const msgs = [ + { + role: "assistant", + content: [ + { type: "reasoning", text: "Should not be processed" }, + { type: "text", text: "Answer" }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, { + id: "openai/gpt-4", + providerID: "openai", + api: { + id: "gpt-4", + url: "https://api.openai.com", + npm: "@ai-sdk/openai", + }, + name: "GPT-4", + capabilities: { + temperature: true, + reasoning: false, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + }, + cost: { + input: 0.03, + output: 0.06, + cache: { read: 0.001, write: 0.002 }, + }, + limit: { + context: 128000, + output: 4096, + }, + status: "active", + options: {}, + headers: {}, + }) + + expect(result[0].content).toEqual([ + { type: "reasoning", text: "Should not be processed" }, + { type: "text", text: "Answer" }, + ]) + expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined() + }) +}) From dcfeb5298323473e18f87be3c99a01e7ab29e7a7 Mon Sep 17 00:00:00 2001 From: opencode Date: Thu, 4 Dec 2025 03:38:18 +0000 Subject: [PATCH 26/27] release: v1.0.130 --- bun.lock | 30 +++++++++++++------------- packages/console/app/package.json | 2 +- packages/console/core/package.json | 2 +- packages/console/function/package.json | 2 +- packages/console/mail/package.json | 2 +- packages/desktop/package.json | 2 +- packages/enterprise/package.json | 2 +- packages/extensions/zed/extension.toml | 12 +++++------ packages/function/package.json | 2 +- packages/opencode/package.json | 2 +- packages/plugin/package.json | 4 ++-- packages/sdk/js/package.json | 4 ++-- packages/slack/package.json | 2 +- packages/tauri/package.json | 2 +- packages/ui/package.json | 2 +- packages/util/package.json | 2 +- packages/web/package.json | 2 +- sdks/vscode/package.json | 2 +- 18 files changed, 39 insertions(+), 39 deletions(-) diff --git a/bun.lock b/bun.lock index 0b60b312305a..93ec0ebf17ad 100644 --- a/bun.lock +++ b/bun.lock @@ -20,7 +20,7 @@ }, "packages/console/app": { "name": "@opencode-ai/console-app", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@cloudflare/vite-plugin": "1.15.2", "@ibm/plex": "6.4.1", @@ -48,7 +48,7 @@ }, "packages/console/core": { "name": "@opencode-ai/console-core", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@aws-sdk/client-sts": "3.782.0", "@jsx-email/render": "1.1.1", @@ -75,7 +75,7 @@ }, "packages/console/function": { "name": "@opencode-ai/console-function", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@ai-sdk/anthropic": "2.0.0", "@ai-sdk/openai": "2.0.2", @@ -99,7 +99,7 @@ }, "packages/console/mail": { "name": "@opencode-ai/console-mail", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@jsx-email/all": "2.2.3", "@jsx-email/cli": "1.4.3", @@ -123,7 +123,7 @@ }, "packages/desktop": { "name": "@opencode-ai/desktop", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@kobalte/core": "catalog:", "@opencode-ai/sdk": "workspace:*", @@ -164,7 +164,7 @@ }, "packages/enterprise": { "name": "@opencode-ai/enterprise", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@opencode-ai/ui": "workspace:*", "@opencode-ai/util": "workspace:*", @@ -192,7 +192,7 @@ }, "packages/function": { "name": "@opencode-ai/function", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@octokit/auth-app": "8.0.1", "@octokit/rest": "22.0.0", @@ -208,7 +208,7 @@ }, "packages/opencode": { "name": "opencode", - "version": "1.0.129", + "version": "1.0.130", "bin": { "opencode": "./bin/opencode", }, @@ -297,7 +297,7 @@ }, "packages/plugin": { "name": "@opencode-ai/plugin", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@opencode-ai/sdk": "workspace:*", "zod": "catalog:", @@ -317,7 +317,7 @@ }, "packages/sdk/js": { "name": "@opencode-ai/sdk", - "version": "1.0.129", + "version": "1.0.130", "devDependencies": { "@hey-api/openapi-ts": "0.81.0", "@tsconfig/node22": "catalog:", @@ -328,7 +328,7 @@ }, "packages/slack": { "name": "@opencode-ai/slack", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@opencode-ai/sdk": "workspace:*", "@slack/bolt": "^3.17.1", @@ -341,7 +341,7 @@ }, "packages/tauri": { "name": "@opencode-ai/tauri", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@tauri-apps/api": "^2", "@tauri-apps/plugin-opener": "^2", @@ -354,7 +354,7 @@ }, "packages/ui": { "name": "@opencode-ai/ui", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@kobalte/core": "catalog:", "@opencode-ai/sdk": "workspace:*", @@ -386,7 +386,7 @@ }, "packages/util": { "name": "@opencode-ai/util", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "zod": "catalog:", }, @@ -397,7 +397,7 @@ }, "packages/web": { "name": "@opencode-ai/web", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@astrojs/cloudflare": "12.6.3", "@astrojs/markdown-remark": "6.3.1", diff --git a/packages/console/app/package.json b/packages/console/app/package.json index 6db271e24b1e..663b1e160dab 100644 --- a/packages/console/app/package.json +++ b/packages/console/app/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/console-app", - "version": "1.0.129", + "version": "1.0.130", "type": "module", "scripts": { "typecheck": "tsgo --noEmit", diff --git a/packages/console/core/package.json b/packages/console/core/package.json index 69f4d14c3523..c0125fd7d832 100644 --- a/packages/console/core/package.json +++ b/packages/console/core/package.json @@ -1,7 +1,7 @@ { "$schema": "https://json.schemastore.org/package.json", "name": "@opencode-ai/console-core", - "version": "1.0.129", + "version": "1.0.130", "private": true, "type": "module", "dependencies": { diff --git a/packages/console/function/package.json b/packages/console/function/package.json index f175147e601a..df3c78bfcb17 100644 --- a/packages/console/function/package.json +++ b/packages/console/function/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/console-function", - "version": "1.0.129", + "version": "1.0.130", "$schema": "https://json.schemastore.org/package.json", "private": true, "type": "module", diff --git a/packages/console/mail/package.json b/packages/console/mail/package.json index 5c9505cc9031..bd28176e3233 100644 --- a/packages/console/mail/package.json +++ b/packages/console/mail/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/console-mail", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@jsx-email/all": "2.2.3", "@jsx-email/cli": "1.4.3", diff --git a/packages/desktop/package.json b/packages/desktop/package.json index 4b797f62af64..367e92b6616a 100644 --- a/packages/desktop/package.json +++ b/packages/desktop/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/desktop", - "version": "1.0.129", + "version": "1.0.130", "description": "", "type": "module", "scripts": { diff --git a/packages/enterprise/package.json b/packages/enterprise/package.json index 96c973b66f9f..dd422ca892fb 100644 --- a/packages/enterprise/package.json +++ b/packages/enterprise/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/enterprise", - "version": "1.0.129", + "version": "1.0.130", "private": true, "type": "module", "scripts": { diff --git a/packages/extensions/zed/extension.toml b/packages/extensions/zed/extension.toml index 1a9788adb30f..7e3b9056848c 100644 --- a/packages/extensions/zed/extension.toml +++ b/packages/extensions/zed/extension.toml @@ -1,7 +1,7 @@ id = "opencode" name = "OpenCode" description = "The AI coding agent built for the terminal" -version = "1.0.129" +version = "1.0.130" schema_version = 1 authors = ["Anomaly"] repository = "https://github.com/sst/opencode" @@ -11,26 +11,26 @@ name = "OpenCode" icon = "./icons/opencode.svg" [agent_servers.opencode.targets.darwin-aarch64] -archive = "https://github.com/sst/opencode/releases/download/v1.0.129/opencode-darwin-arm64.zip" +archive = "https://github.com/sst/opencode/releases/download/v1.0.130/opencode-darwin-arm64.zip" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.darwin-x86_64] -archive = "https://github.com/sst/opencode/releases/download/v1.0.129/opencode-darwin-x64.zip" +archive = "https://github.com/sst/opencode/releases/download/v1.0.130/opencode-darwin-x64.zip" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.linux-aarch64] -archive = "https://github.com/sst/opencode/releases/download/v1.0.129/opencode-linux-arm64.zip" +archive = "https://github.com/sst/opencode/releases/download/v1.0.130/opencode-linux-arm64.zip" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.linux-x86_64] -archive = "https://github.com/sst/opencode/releases/download/v1.0.129/opencode-linux-x64.zip" +archive = "https://github.com/sst/opencode/releases/download/v1.0.130/opencode-linux-x64.zip" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.windows-x86_64] -archive = "https://github.com/sst/opencode/releases/download/v1.0.129/opencode-windows-x64.zip" +archive = "https://github.com/sst/opencode/releases/download/v1.0.130/opencode-windows-x64.zip" cmd = "./opencode.exe" args = ["acp"] diff --git a/packages/function/package.json b/packages/function/package.json index 7c886f44d863..e94d8cd3d13c 100644 --- a/packages/function/package.json +++ b/packages/function/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/function", - "version": "1.0.129", + "version": "1.0.130", "$schema": "https://json.schemastore.org/package.json", "private": true, "type": "module", diff --git a/packages/opencode/package.json b/packages/opencode/package.json index 687c0d385b72..e7d9153bd985 100644 --- a/packages/opencode/package.json +++ b/packages/opencode/package.json @@ -1,6 +1,6 @@ { "$schema": "https://json.schemastore.org/package.json", - "version": "1.0.129", + "version": "1.0.130", "name": "opencode", "type": "module", "private": true, diff --git a/packages/plugin/package.json b/packages/plugin/package.json index 2871fc7db2e7..9967420b4da2 100644 --- a/packages/plugin/package.json +++ b/packages/plugin/package.json @@ -1,7 +1,7 @@ { "$schema": "https://json.schemastore.org/package.json", "name": "@opencode-ai/plugin", - "version": "1.0.129", + "version": "1.0.130", "type": "module", "scripts": { "typecheck": "tsgo --noEmit", @@ -24,4 +24,4 @@ "typescript": "catalog:", "@typescript/native-preview": "catalog:" } -} +} \ No newline at end of file diff --git a/packages/sdk/js/package.json b/packages/sdk/js/package.json index 9ea1689ae24a..49de47d2d174 100644 --- a/packages/sdk/js/package.json +++ b/packages/sdk/js/package.json @@ -1,7 +1,7 @@ { "$schema": "https://json.schemastore.org/package.json", "name": "@opencode-ai/sdk", - "version": "1.0.129", + "version": "1.0.130", "type": "module", "scripts": { "typecheck": "tsgo --noEmit", @@ -26,4 +26,4 @@ "publishConfig": { "directory": "dist" } -} +} \ No newline at end of file diff --git a/packages/slack/package.json b/packages/slack/package.json index f81f9069f2bd..bcae3b7a4d89 100644 --- a/packages/slack/package.json +++ b/packages/slack/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/slack", - "version": "1.0.129", + "version": "1.0.130", "type": "module", "scripts": { "dev": "bun run src/index.ts", diff --git a/packages/tauri/package.json b/packages/tauri/package.json index 01f42b4bfe11..5e71959f26f8 100644 --- a/packages/tauri/package.json +++ b/packages/tauri/package.json @@ -1,7 +1,7 @@ { "name": "@opencode-ai/tauri", "private": true, - "version": "1.0.129", + "version": "1.0.130", "type": "module", "scripts": { "dev": "vite", diff --git a/packages/ui/package.json b/packages/ui/package.json index 78d7c2265687..b9011d6c860d 100644 --- a/packages/ui/package.json +++ b/packages/ui/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/ui", - "version": "1.0.129", + "version": "1.0.130", "type": "module", "exports": { "./*": "./src/components/*.tsx", diff --git a/packages/util/package.json b/packages/util/package.json index a9f365df5fcd..c891a034666c 100644 --- a/packages/util/package.json +++ b/packages/util/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/util", - "version": "1.0.129", + "version": "1.0.130", "private": true, "type": "module", "exports": { diff --git a/packages/web/package.json b/packages/web/package.json index 1789991fc32b..1277872da7fc 100644 --- a/packages/web/package.json +++ b/packages/web/package.json @@ -1,7 +1,7 @@ { "name": "@opencode-ai/web", "type": "module", - "version": "1.0.129", + "version": "1.0.130", "scripts": { "dev": "astro dev", "dev:remote": "VITE_API_URL=https://api.opencode.ai astro dev", diff --git a/sdks/vscode/package.json b/sdks/vscode/package.json index bc20f1a18b1a..fd0b3e635413 100644 --- a/sdks/vscode/package.json +++ b/sdks/vscode/package.json @@ -2,7 +2,7 @@ "name": "opencode", "displayName": "opencode", "description": "opencode for VS Code", - "version": "1.0.129", + "version": "1.0.130", "publisher": "sst-dev", "repository": { "type": "git", From 43d2cc4a69c959a2b88a71f134250cb135eaab29 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Thu, 4 Dec 2025 03:43:39 +0000 Subject: [PATCH 27/27] sync: record last synced tag v1.0.130 Fixed type error in prompt.ts: model.modelID -> model.id --- .github/last-synced-tag | 2 +- packages/opencode/src/session/prompt.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/last-synced-tag b/.github/last-synced-tag index 91dd8d56caa1..93e9cce59931 100644 --- a/.github/last-synced-tag +++ b/.github/last-synced-tag @@ -1 +1 @@ -v1.0.129 +v1.0.130 diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts index 9315838cef4e..b125425814cd 100644 --- a/packages/opencode/src/session/prompt.ts +++ b/packages/opencode/src/session/prompt.ts @@ -356,7 +356,7 @@ export namespace SessionPrompt { agent: lastUser.agent, messageID: assistantMessage.id, callID: part.callID, - extra: { providerID: model.providerID, modelID: model.modelID }, + extra: { providerID: model.providerID, modelID: model.id }, metadata: async () => {}, }, )