Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions packages/opencode/src/session/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -333,6 +333,28 @@ export namespace LLM {
toolName: lower,
}
}
// When the tool name is valid but args failed to parse, the output
// was likely truncated by the token limit. Give the model a clear
// signal so it can retry with smaller input instead of looping.
if (tools[failed.toolCall.toolName] || tools[lower]) {
l.warn("truncated tool call detected", {
tool: failed.toolCall.toolName,
error: failed.error.message,
})
return {
...failed.toolCall,
input: JSON.stringify({
tool: failed.toolCall.toolName,
error:
"Your output was truncated because it exceeded the token limit. " +
"The tool arguments were cut off and could not be parsed. " +
"Split your operation into smaller pieces and try again. " +
"For file writes, use the Edit tool for targeted changes or write smaller sections. " +
"For bash commands, break long heredocs into multiple shorter commands.",
}),
toolName: "invalid",
}
}
return {
...failed.toolCall,
input: JSON.stringify({
Expand Down
29 changes: 29 additions & 0 deletions packages/opencode/src/session/prompt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1376,6 +1376,35 @@ NOTE: At any point in time through this workflow you should feel free to ask the
const hasToolCalls =
lastAssistantMsg?.parts.some((part) => part.type === "tool" && !part.metadata?.providerExecuted) ?? false

// When the model hit the output token limit (finishReason: "length"),
// its response was truncated. Instead of silently exiting, inject a
// continuation message so the model can finish its work.
if (lastAssistant?.finish === "length" && lastUser.id < lastAssistant.id) {
log.warn("output truncated by token limit, auto-continuing", { sessionID })
const continuationMsg: MessageV2.User = {
id: MessageID.ascending(),
sessionID,
role: "user",
time: { created: Date.now() },
agent: lastUser.agent,
model: lastUser.model,
}
yield* sessions.updateMessage(continuationMsg)
yield* sessions.updatePart({
id: PartID.ascending(),
messageID: continuationMsg.id,
sessionID,
type: "text",
text:
"Your previous response was truncated because it exceeded the output token limit. " +
"If you were writing a file, split it into smaller pieces. " +
"If you were using a tool, use smaller arguments. " +
"Continue where you left off.",
synthetic: true,
})
continue
}

if (
lastAssistant?.finish &&
!["tool-calls"].includes(lastAssistant.finish) &&
Expand Down
Loading