Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion packages/opencode/src/agent/prompt/compaction.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
You are a helpful AI assistant tasked with summarizing conversations.

When asked to summarize, provide a detailed but concise summary of the conversation.
When asked to summarize, provide a detailed but concise summary of the conversation.
Focus on information that would be helpful for continuing the conversation, including:
- What was done
- What is currently being worked on
Expand All @@ -10,3 +10,5 @@ Focus on information that would be helpful for continuing the conversation, incl
- Important technical decisions and why they were made

Your summary should be comprehensive enough to provide context but concise enough to be quickly understood.

Do not respond to any questions in the conversation, only output the summary.
6 changes: 6 additions & 0 deletions packages/opencode/src/config/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1161,6 +1161,12 @@ export namespace Config {
.object({
auto: z.boolean().optional().describe("Enable automatic compaction when context is full (default: true)"),
prune: z.boolean().optional().describe("Enable pruning of old tool outputs (default: true)"),
reserved: z
.number()
.int()
.min(0)
.optional()
.describe("Token buffer for compaction. Leaves enough window to avoid overflow during compaction."),
})
.optional(),
experimental: z
Expand Down
28 changes: 5 additions & 23 deletions packages/opencode/src/provider/transform.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import type { JSONSchema } from "zod/v4/core"
import type { Provider } from "./provider"
import type { ModelsDev } from "./models"
import { iife } from "@/util/iife"
import { Flag } from "@/flag/flag"

type Modality = NonNullable<ModelsDev.Model["modalities"]>["input"][number]

Expand All @@ -17,6 +18,8 @@ function mimeToModality(mime: string): Modality | undefined {
}

export namespace ProviderTransform {
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000

// Maps npm package to the key the AI SDK expects for providerOptions
function sdkKey(npm: string): string | undefined {
switch (npm) {
Expand Down Expand Up @@ -723,29 +726,8 @@ export namespace ProviderTransform {
return { [key]: options }
}

export function maxOutputTokens(
npm: string,
options: Record<string, any>,
modelLimit: number,
globalLimit: number,
): number {
const modelCap = modelLimit || globalLimit
const standardLimit = Math.min(modelCap, globalLimit)

if (npm === "@ai-sdk/anthropic" || npm === "@ai-sdk/google-vertex/anthropic") {
const thinking = options?.["thinking"]
const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0
const enabled = thinking?.["type"] === "enabled"
if (enabled && budgetTokens > 0) {
// Return text tokens so that text + thinking <= model cap, preferring 32k text when possible.
if (budgetTokens + standardLimit <= modelCap) {
return standardLimit
}
return modelCap - budgetTokens
}
}

return standardLimit
export function maxOutputTokens(model: Provider.Model): number {
return Math.min(model.limit.output, OUTPUT_TOKEN_MAX) || OUTPUT_TOKEN_MAX
}

export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema | JSONSchema7): JSONSchema7 {
Expand Down
49 changes: 41 additions & 8 deletions packages/opencode/src/session/compaction.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@ import { Instance } from "../project/instance"
import { Provider } from "../provider/provider"
import { MessageV2 } from "./message-v2"
import z from "zod"
import { SessionPrompt } from "./prompt"
import { Token } from "../util/token"
import { Log } from "../util/log"
import { SessionProcessor } from "./processor"
import { fn } from "@/util/fn"
import { Agent } from "@/agent/agent"
import { Plugin } from "@/plugin"
import { Config } from "@/config/config"
import { ProviderTransform } from "@/provider/transform"

export namespace SessionCompaction {
const log = Log.create({ service: "session.compaction" })
Expand All @@ -27,15 +27,22 @@ export namespace SessionCompaction {
),
}

const COMPACTION_BUFFER = 20_000

export async function isOverflow(input: { tokens: MessageV2.Assistant["tokens"]; model: Provider.Model }) {
const config = await Config.get()
if (config.compaction?.auto === false) return false
const context = input.model.limit.context
if (context === 0) return false
const count = input.tokens.input + input.tokens.cache.read + input.tokens.output
const output = Math.min(input.model.limit.output, SessionPrompt.OUTPUT_TOKEN_MAX) || SessionPrompt.OUTPUT_TOKEN_MAX
const usable = input.model.limit.input || context - output
return count > usable

const count =
input.tokens.total ||
input.tokens.input + input.tokens.output + input.tokens.cache.read + input.tokens.cache.write

const reserved =
config.compaction?.reserved ?? Math.min(COMPACTION_BUFFER, ProviderTransform.maxOutputTokens(input.model))
const usable = input.model.limit.input ? input.model.limit.input - reserved : context - reserved
return count >= usable
}

export const PRUNE_MINIMUM = 20_000
Expand Down Expand Up @@ -139,8 +146,34 @@ export namespace SessionCompaction {
{ sessionID: input.sessionID },
{ context: [], prompt: undefined },
)
const defaultPrompt =
"Provide a detailed prompt for continuing our conversation above. Focus on information that would be helpful for continuing the conversation, including what we did, what we're doing, which files we're working on, and what we're going to do next considering new session will not have access to our conversation."
const defaultPrompt = `Provide a detailed prompt for continuing our conversation above.
Focus on information that would be helpful for continuing the conversation, including what we did, what we're doing, which files we're working on, and what we're going to do next.
The summary that you construct will be used so that another agent can read it and continue the work.

When constructing the summary, try to stick to this template:
---
## Goal

[What goal(s) is the user trying to accomplish?]

## Instructions

- [What important instructions did the user give you that are relevant]
- [If there is a plan or spec, include information about it so next agent can continue using it]

## Discoveries

[What notable things were learned during this conversation that would be useful for the next agent to know when continuing the work]

## Accomplished

[What work has been completed, what work is still in progress, and what work is left?]

## Relevant files / directories

[Construct a structured list of relevant files that have been read, edited, or created that pertain to the task at hand. If all the files in a directory are relevant, include the path to the directory.]
---`

const promptText = compacting.prompt ?? [defaultPrompt, ...compacting.context].join("\n\n")
const result = await processor.process({
user: userMessage,
Expand Down Expand Up @@ -181,7 +214,7 @@ export namespace SessionCompaction {
sessionID: input.sessionID,
type: "text",
synthetic: true,
text: "Continue if you have next steps",
text: "Continue if you have next steps, or stop and ask for clarification if you are unsure how to proceed.",
time: {
start: Date.now(),
end: Date.now(),
Expand Down
64 changes: 45 additions & 19 deletions packages/opencode/src/session/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import { BusEvent } from "@/bus/bus-event"
import { Bus } from "@/bus"
import { Decimal } from "decimal.js"
import z from "zod"
import { type LanguageModelUsage, type ProviderMetadata } from "ai"
import { type ProviderMetadata } from "ai"
import { Config } from "../config/config"
import { Flag } from "../flag/flag"
import { Identifier } from "../id/id"
Expand All @@ -22,6 +22,8 @@ import { Snapshot } from "@/snapshot"
import type { Provider } from "@/provider/provider"
import { PermissionNext } from "@/permission/next"
import { Global } from "@/global"
import type { LanguageModelV2Usage } from "@ai-sdk/provider"
import { iife } from "@/util/iife"

export namespace Session {
const log = Log.create({ service: "session" })
Expand Down Expand Up @@ -439,34 +441,58 @@ export namespace Session {
export const getUsage = fn(
z.object({
model: z.custom<Provider.Model>(),
usage: z.custom<LanguageModelUsage>(),
usage: z.custom<LanguageModelV2Usage>(),
metadata: z.custom<ProviderMetadata>().optional(),
}),
(input) => {
const cacheReadInputTokens = input.usage.cachedInputTokens ?? 0
const cacheWriteInputTokens = (input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// @ts-expect-error
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
// @ts-expect-error
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number

const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
const adjustedInputTokens = excludesCachedTokens
? (input.usage.inputTokens ?? 0)
: (input.usage.inputTokens ?? 0) - cacheReadInputTokens - cacheWriteInputTokens
const safe = (value: number) => {
if (!Number.isFinite(value)) return 0
return value
}
const inputTokens = safe(input.usage.inputTokens ?? 0)
const outputTokens = safe(input.usage.outputTokens ?? 0)
const reasoningTokens = safe(input.usage.reasoningTokens ?? 0)

const cacheReadInputTokens = safe(input.usage.cachedInputTokens ?? 0)
const cacheWriteInputTokens = safe(
(input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// @ts-expect-error
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
// @ts-expect-error
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number,
)

// OpenRouter provides inputTokens as the total count of input tokens (including cached).
// AFAIK other providers (OpenRouter/OpenAI/Gemini etc.) do it the same way e.g. vercel/ai#8794 (comment)
// Anthropic does it differently though - inputTokens doesn't include cached tokens.
// It looks like OpenCode's cost calculation assumes all providers return inputTokens the same way Anthropic does (I'm guessing getUsage logic was originally implemented with anthropic), so it's causing incorrect cost calculation for OpenRouter and others.
const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
const adjustedInputTokens = safe(
excludesCachedTokens ? inputTokens : inputTokens - cacheReadInputTokens - cacheWriteInputTokens,
)

const total = iife(() => {
// Anthropic doesn't provide total_tokens, also ai sdk will vastly undercount if we
// don't compute from components
if (
input.model.api.npm === "@ai-sdk/anthropic" ||
input.model.api.npm === "@ai-sdk/amazon-bedrock" ||
input.model.api.npm === "@ai-sdk/google-vertex/anthropic"
) {
return adjustedInputTokens + outputTokens + cacheReadInputTokens + cacheWriteInputTokens
}
return input.usage.totalTokens
})

const tokens = {
input: safe(adjustedInputTokens),
output: safe(input.usage.outputTokens ?? 0),
reasoning: safe(input.usage?.reasoningTokens ?? 0),
total,
input: adjustedInputTokens,
output: outputTokens,
reasoning: reasoningTokens,
cache: {
write: safe(cacheWriteInputTokens),
read: safe(cacheReadInputTokens),
write: cacheWriteInputTokens,
read: cacheReadInputTokens,
},
}

Expand Down
12 changes: 2 additions & 10 deletions packages/opencode/src/session/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@ import { Auth } from "@/auth"

export namespace LLM {
const log = Log.create({ service: "llm" })

export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
export const OUTPUT_TOKEN_MAX = ProviderTransform.OUTPUT_TOKEN_MAX

export type StreamInput = {
user: MessageV2.User
Expand Down Expand Up @@ -149,14 +148,7 @@ export namespace LLM {
)

const maxOutputTokens =
isCodex || provider.id.includes("github-copilot")
? undefined
: ProviderTransform.maxOutputTokens(
input.model.api.npm,
params.options,
input.model.limit.output,
OUTPUT_TOKEN_MAX,
)
isCodex || provider.id.includes("github-copilot") ? undefined : ProviderTransform.maxOutputTokens(input.model)

const tools = await resolveTools(input)

Expand Down
2 changes: 2 additions & 0 deletions packages/opencode/src/session/message-v2.ts
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@ export namespace MessageV2 {
snapshot: z.string().optional(),
cost: z.number(),
tokens: z.object({
total: z.number().optional(),
input: z.number(),
output: z.number(),
reasoning: z.number(),
Expand Down Expand Up @@ -383,6 +384,7 @@ export namespace MessageV2 {
summary: z.boolean().optional(),
cost: z.number(),
tokens: z.object({
total: z.number().optional(),
input: z.number(),
output: z.number(),
reasoning: z.number(),
Expand Down
3 changes: 3 additions & 0 deletions packages/opencode/src/session/processor.ts
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,9 @@ export namespace SessionProcessor {
stack: JSON.stringify(e.stack),
})
const error = MessageV2.fromError(e, { providerID: input.model.providerID })
if (MessageV2.ContextOverflowError.isInstance(error)) {
// TODO: Handle context overflow error
}
const retry = SessionRetry.retryable(error)
if (retry !== undefined) {
attempt++
Expand Down
1 change: 0 additions & 1 deletion packages/opencode/src/session/prompt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ globalThis.AI_SDK_LOG_WARNINGS = false

export namespace SessionPrompt {
const log = Log.create({ service: "session.prompt" })
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000

const state = Instance.state(
() => {
Expand Down
3 changes: 1 addition & 2 deletions packages/opencode/src/session/retry.ts
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,8 @@ export namespace SessionRetry {
}

export function retryable(error: ReturnType<NamedError["toObject"]>) {
// DO NOT retry context overflow errors
// context overflow errors should not be retried
if (MessageV2.ContextOverflowError.isInstance(error)) return undefined

if (MessageV2.APIError.isInstance(error)) {
if (!error.data.isRetryable) return undefined
return error.data.message.includes("Overloaded") ? "Provider is overloaded" : error.data.message
Expand Down
Loading
Loading