diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts
index dd74b83f50f2..4e660850d1b1 100644
--- a/packages/opencode/src/session/prompt.ts
+++ b/packages/opencode/src/session/prompt.ts
@@ -481,6 +481,13 @@ export namespace SessionPrompt {
},
result,
)
+ if ((result as any)?.inject?.length) {
+ await flushInjectedMessages(
+ (result as any).inject,
+ sessionID,
+ { agent: lastUser.agent, model: lastUser.model },
+ )
+ }
assistantMessage.finish = "tool-calls"
assistantMessage.time.completed = Date.now()
await Session.updateMessage(assistantMessage)
@@ -768,6 +775,42 @@ export namespace SessionPrompt {
return Provider.defaultModel()
}
+ /**
+ * Persist injected messages from a `tool.execute.after` hook so the AI
+ * sees them on the next loop iteration. Each entry becomes a synthetic
+ * user message with a single text part, mirroring the existing pattern
+ * used for subtask summary messages (see loop body).
+ */
+ async function flushInjectedMessages(
+ inject: Array<{ role: "user" | "system"; text: string }> | undefined,
+ sessionID: string,
+ lastUser: { agent: string; model: MessageV2.User["model"] },
+ ) {
+ if (!inject?.length) return
+ for (const msg of inject) {
+ const userMsg: MessageV2.User = {
+ id: MessageID.ascending(),
+ sessionID: SessionID.make(sessionID),
+ role: "user",
+ time: { created: Date.now() },
+ agent: lastUser.agent,
+ model: lastUser.model,
+ }
+ await Session.updateMessage(userMsg)
+ await Session.updatePart({
+ id: PartID.ascending(),
+ messageID: userMsg.id,
+ sessionID: SessionID.make(sessionID),
+ type: "text",
+ text:
+ msg.role === "system"
+ ? `\n${msg.text}\n`
+ : msg.text,
+ synthetic: true,
+ } satisfies MessageV2.TextPart)
+ }
+ }
+
/** @internal Exported for testing */
export async function resolveTools(input: {
agent: Agent.Info
@@ -858,6 +901,13 @@ export namespace SessionPrompt {
},
output,
)
+ if ((output as any).inject?.length) {
+ await flushInjectedMessages(
+ (output as any).inject,
+ ctx.sessionID,
+ { agent: input.agent.name, model: { providerID: input.model.providerID, modelID: ModelID.make(input.model.api.id) } },
+ )
+ }
return output
},
})
@@ -905,6 +955,13 @@ export namespace SessionPrompt {
},
result,
)
+ if ((result as any).inject?.length) {
+ await flushInjectedMessages(
+ (result as any).inject,
+ ctx.sessionID,
+ { agent: input.agent.name, model: { providerID: input.model.providerID, modelID: ModelID.make(input.model.api.id) } },
+ )
+ }
const textParts: string[] = []
const attachments: Omit[] = []
diff --git a/packages/plugin/src/index.ts b/packages/plugin/src/index.ts
index a264cf5aaf94..9208bcfda64a 100644
--- a/packages/plugin/src/index.ts
+++ b/packages/plugin/src/index.ts
@@ -220,12 +220,33 @@ export interface Hooks {
input: { cwd: string; sessionID?: string; callID?: string },
output: { env: Record },
) => Promise
+ /**
+ * Called after a tool finishes execution. Plugins can modify the tool
+ * result (title, output, metadata) before the AI sees it.
+ *
+ * Use `inject` to append extra messages that the AI will see on the
+ * **next** loop iteration. This is the primary mechanism for behavioral
+ * enforcement — e.g. reminding the AI to update planning files after
+ * every edit, or warning about security policy violations.
+ *
+ * ```ts
+ * "tool.execute.after": async (input, output) => {
+ * if (input.tool === "edit") {
+ * output.inject = [
+ * { role: "user", text: "Remember to update progress.md." },
+ * ]
+ * }
+ * }
+ * ```
+ */
"tool.execute.after"?: (
input: { tool: string; sessionID: string; callID: string; args: any },
output: {
title: string
output: string
metadata: any
+ /** Messages to inject into the conversation after this tool call. */
+ inject?: Array<{ role: "user" | "system"; text: string }>
},
) => Promise
"experimental.chat.messages.transform"?: (