Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion packages/opencode/src/session/processor.ts
Original file line number Diff line number Diff line change
Expand Up @@ -483,7 +483,7 @@ export namespace SessionProcessor {
const halt = Effect.fn("SessionProcessor.halt")(function* (e: unknown) {
log.error("process", { error: e, stack: e instanceof Error ? e.stack : undefined })
const error = parse(e)
if (MessageV2.ContextOverflowError.isInstance(error)) {
if (MessageV2.ContextOverflowError.isInstance(error) || e instanceof RepetitionError) {
ctx.needsCompaction = true
yield* bus.publish(Session.Event.Error, { sessionID: ctx.sessionID, error })
return
Expand Down
22 changes: 17 additions & 5 deletions packages/opencode/src/session/prompt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ export namespace SessionPrompt {
yield* status.set(sessionID, { type: "idle" })
}),
onBusy: status.set(sessionID, { type: "busy" }),
onInterrupt: lastAssistant(sessionID),
onInterrupt: latestAssistant(sessionID),
busy: () => {
throw new Session.BusyError(sessionID)
},
Expand Down Expand Up @@ -250,6 +250,17 @@ export namespace SessionPrompt {
)
})

const sanitize = (msgs: MessageV2.WithParts[]) => {
const drop = msgs.filter((msg) => msg.info.role === "assistant" && msg.info.summary !== true && !msg.info.finish)
if (drop.length === 0) return msgs
const ids = new Set(drop.map((msg) => msg.info.id))
log.warn("ignoring incomplete assistant messages", {
sessionID: drop[0]?.info.sessionID,
ids: [...ids],
})
Comment on lines +253 to +260
Copy link

Copilot AI Apr 5, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sanitize() logs a warn every time it encounters the same unfinished assistant messages, but it doesn’t mutate/persist anything that would prevent repeated warnings on subsequent turns. If a session has a permanently-stale unfinished assistant message, this can spam logs on every loop() call. Consider de-duping (e.g., cache warned messageIDs per session/instance), lowering to debug, or only logging once per session until the stale message set changes.

Suggested change
const sanitize = (msgs: MessageV2.WithParts[]) => {
const drop = msgs.filter((msg) => msg.info.role === "assistant" && msg.info.summary !== true && !msg.info.finish)
if (drop.length === 0) return msgs
const ids = new Set(drop.map((msg) => msg.info.id))
log.warn("ignoring incomplete assistant messages", {
sessionID: drop[0]?.info.sessionID,
ids: [...ids],
})
const warnedIncompleteAssistantMessageIDsBySession = new Map<SessionID, Set<MessageID>>()
const sanitize = (msgs: MessageV2.WithParts[]) => {
const drop = msgs.filter((msg) => msg.info.role === "assistant" && msg.info.summary !== true && !msg.info.finish)
if (drop.length === 0) return msgs
const sessionID = drop[0]?.info.sessionID
const ids = new Set(drop.map((msg) => msg.info.id))
const warned = warnedIncompleteAssistantMessageIDsBySession.get(sessionID) ?? new Set<MessageID>()
const unwarnedIDs = [...ids].filter((id) => !warned.has(id))
if (unwarnedIDs.length > 0) {
log.warn("ignoring incomplete assistant messages", {
sessionID,
ids: unwarnedIDs,
})
}
warnedIncompleteAssistantMessageIDsBySession.set(sessionID, ids)

Copilot uses AI. Check for mistakes.
return msgs.filter((msg) => !ids.has(msg.info.id))
}

const insertReminders = Effect.fn("SessionPrompt.insertReminders")(function* (input: {
messages: MessageV2.WithParts[]
agent: Agent.Info
Expand Down Expand Up @@ -1324,7 +1335,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
},
)

const lastAssistant = (sessionID: SessionID) =>
const latestAssistant: (sessionID: SessionID) => Effect.Effect<MessageV2.WithParts> = (sessionID) =>
Effect.promise(async () => {
let latest: MessageV2.WithParts | undefined
for await (const item of MessageV2.stream(sessionID)) {
Expand All @@ -1346,7 +1357,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
yield* status.set(sessionID, { type: "busy" })
log.info("loop", { step, sessionID })

let msgs = yield* MessageV2.filterCompactedEffect(sessionID)
let msgs = sanitize(yield* MessageV2.filterCompactedEffect(sessionID))

let lastUser: MessageV2.User | undefined
let lastAssistant: MessageV2.Assistant | undefined
Expand Down Expand Up @@ -1378,7 +1389,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
lastUser.id < lastAssistant.id
) {
log.info("exiting loop", { sessionID })
break
return lastAssistantMsg ?? (yield* latestAssistant(sessionID))
}

step++
Expand Down Expand Up @@ -1563,7 +1574,8 @@ NOTE: At any point in time through this workflow you should feel free to ask the
}

yield* compaction.prune({ sessionID }).pipe(Effect.ignore, Effect.forkIn(scope))
return yield* lastAssistant(sessionID)
const msgs = sanitize(yield* MessageV2.filterCompactedEffect(sessionID))
return msgs.findLast((msg) => msg.info.role === "assistant") ?? (yield* latestAssistant(sessionID))
},
)

Expand Down
2 changes: 0 additions & 2 deletions packages/opencode/test/scenario/harness.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@ import { Todo } from "../../src/session/todo"
import { ToolRegistry } from "../../src/tool/registry"
import { Truncate } from "../../src/tool/truncate"
import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
import { Question } from "../../src/question"
import { Todo } from "../../src/session/todo"
import { Instance } from "../../src/project/instance"
import { provideTmpdirInstance } from "../fixture/fixture"
import { testEffect } from "../lib/effect"
Expand Down
43 changes: 43 additions & 0 deletions packages/opencode/test/session/processor-effect.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -544,6 +544,49 @@ it.live("session.processor effect tests compact on structured context overflow",
),
)

it.live("session.processor effect tests compact on repetition loops in text output", () =>
provideTmpdirServer(
({ dir, llm }) =>
Effect.gen(function* () {
const { processors, session, provider } = yield* boot()

yield* llm.push(reply().text("<invoke>".repeat(1200)).stop())

const chat = yield* session.create({})
const parent = yield* user(chat.id, "glm loop")
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
const handle = yield* processors.create({
assistantMessage: msg,
sessionID: chat.id,
model: mdl,
})

const value = yield* handle.process({
user: {
id: parent.id,
sessionID: chat.id,
role: "user",
time: parent.time,
agent: parent.agent,
model: { providerID: ref.providerID, modelID: ref.modelID },
} satisfies MessageV2.User,
sessionID: chat.id,
model: mdl,
agent: agent(),
system: [],
messages: [{ role: "user", content: "glm loop" }],
tools: {},
})

expect(value).toBe("compact")
expect(yield* llm.calls).toBe(1)
expect(handle.message.error).toBeUndefined()
}),
{ git: true, config: (url) => providerCfg(url) },
),
)

it.live("session.processor effect tests mark pending tools as aborted on cleanup", () =>
provideTmpdirServer(
({ dir, llm }) =>
Expand Down
76 changes: 76 additions & 0 deletions packages/opencode/test/session/prompt-effect.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,34 @@ const seed = Effect.fn("test.seed")(function* (sessionID: SessionID, opts?: { fi
return { user: msg, assistant }
})

const broken = Effect.fn("test.broken")(function* (sessionID: SessionID, text: string, parentID?: MessageID) {
const session = yield* Session.Service
const id = parentID ?? (yield* user(sessionID, "hello")).id
const msg: MessageV2.Assistant = {
id: MessageID.ascending(),
role: "assistant",
parentID: id,
sessionID,
mode: "build",
agent: "build",
cost: 0,
path: { cwd: "/tmp", root: "/tmp" },
tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
modelID: ref.modelID,
providerID: ref.providerID,
time: { created: Date.now(), completed: Date.now() },
}
yield* session.updateMessage(msg)
yield* session.updatePart({
id: PartID.ascending(),
messageID: msg.id,
sessionID,
type: "text",
text,
})
return msg
})

const addSubtask = (sessionID: SessionID, messageID: MessageID, model = ref) =>
Effect.gen(function* () {
const session = yield* Session.Service
Expand Down Expand Up @@ -325,6 +353,54 @@ it.live("loop exits immediately when last assistant has stop finish", () =>
),
)

it.live("loop ignores unfinished assistant pollution when building the next turn", () =>
provideTmpdirServer(
Effect.fnUntraced(function* ({ llm }) {
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const chat = yield* sessions.create({
title: "Pinned",
permission: [{ permission: "*", pattern: "*", action: "allow" }],
})
const msg = yield* user(chat.id, "check PR #412")
yield* broken(
chat.id,
"`apps/liff` `StatusPage` `LiffProvider` `Wave 1B` `apps/liff` `StatusPage` `LiffProvider`",
msg.id,
)
yield* llm.text("PR #412 is still open.")

const result = yield* prompt.loop({ sessionID: chat.id })
expect(result.info.role).toBe("assistant")
expect(result.parts.some((part) => part.type === "text" && part.text === "PR #412 is still open.")).toBe(true)

const [hit] = yield* llm.inputs
expect(JSON.stringify(hit)).not.toContain("apps/liff")
expect(JSON.stringify(hit)).not.toContain("LiffProvider")
}),
{ git: true, config: providerCfg },
),
)

it.live("loop returns the last finished assistant when a stale unfinished assistant trails it", () =>
provideTmpdirServer(
Effect.fnUntraced(function* ({ llm }) {
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const chat = yield* sessions.create({ title: "Pinned" })
const seeded = yield* seed(chat.id, { finish: "stop" })
yield* broken(chat.id, "stale broken thinking output", seeded.user.id)

const result = yield* prompt.loop({ sessionID: chat.id })
expect(result.info.role).toBe("assistant")
expect(result.info.id).toBe(seeded.assistant.id)
expect(result.parts.some((part) => part.type === "text" && part.text === "hi there")).toBe(true)
expect(yield* llm.calls).toBe(0)
}),
{ git: true, config: providerCfg },
),
)

it.live("loop calls LLM and returns assistant message", () =>
provideTmpdirServer(
Effect.fnUntraced(function* ({ llm }) {
Expand Down
Loading