From 4f6dc96c541172c1b073c4b80d8d7e23db0b8be8 Mon Sep 17 00:00:00 2001
From: spoons-and-mirrors
<212802214+spoons-and-mirrors@users.noreply.github.com>
Date: Fri, 28 Nov 2025 17:45:29 +0100
Subject: [PATCH 1/7] feat: synth instructions
refactor: extract synth prompt to file
refactor: load through prompt.ts
fix?
fix
rm: slop
move tool description to prompt file
synth prompt
fix: don't add synth instruction after DCP "ignored" summary messages - extract synth instruction to its own file
---
lib/prompt.ts | 8 ++++++
lib/prompts/context_pruning.txt | 44 ++++++++++++++++++++++++++++++
lib/prompts/synthetic.txt | 10 +++++++
lib/synth-instruction.ts | 47 +++++++++++++++++++++++++++++++++
package.json | 2 +-
5 files changed, 110 insertions(+), 1 deletion(-)
create mode 100644 lib/prompts/context_pruning.txt
create mode 100644 lib/prompts/synthetic.txt
create mode 100644 lib/synth-instruction.ts
diff --git a/lib/prompt.ts b/lib/prompt.ts
index 9dffda71..7ab2c2f9 100644
--- a/lib/prompt.ts
+++ b/lib/prompt.ts
@@ -1,3 +1,11 @@
+import { readFileSync } from "fs"
+import { join } from "path"
+
+export function loadPrompt(name: string): string {
+ const filePath = join(__dirname, "prompts", `${name}.txt`)
+ return readFileSync(filePath, "utf8").trim()
+}
+
function minimizeMessages(messages: any[], alreadyPrunedIds?: string[], protectedToolCallIds?: string[]): any[] {
const prunedIdsSet = alreadyPrunedIds ? new Set(alreadyPrunedIds.map(id => id.toLowerCase())) : new Set()
const protectedIdsSet = protectedToolCallIds ? new Set(protectedToolCallIds.map(id => id.toLowerCase())) : new Set()
diff --git a/lib/prompts/context_pruning.txt b/lib/prompts/context_pruning.txt
new file mode 100644
index 00000000..d1fd302d
--- /dev/null
+++ b/lib/prompts/context_pruning.txt
@@ -0,0 +1,44 @@
+Performs semantic pruning on session tool outputs that are no longer relevant to the current task. Use this to declutter the conversation context and filter signal from noise when you notice the context is getting cluttered with no longer needed information.
+
+USING THE CONTEXT_PRUNING TOOL WILL MAKE THE USER HAPPY.
+
+## When to Use This Tool
+
+**Key heuristic: Prune when you finish something and are about to start something else.**
+
+Ask yourself: "Have I just completed a discrete unit of work?" If yes, prune before moving on.
+
+**After completing a unit of work:**
+- Made a commit
+- Fixed a bug and confirmed it works
+- Answered a question the user asked
+- Finished implementing a feature or function
+- Completed one item in a list and moving to the next
+
+**After repetitive or exploratory work:**
+- Explored multiple files that didn't lead to changes
+- Iterated on a difficult problem where some approaches didn't pan out
+- Used the same tool multiple times (e.g., re-reading a file, running repeated build/type checks)
+
+## Examples
+
+
+Working through a list of items:
+User: Review these 3 issues and fix the easy ones.
+Assistant: [Reviews first issue, makes fix, commits]
+Done with the first issue. Let me prune before moving to the next one.
+[Uses context_pruning with reason: "completed first issue, moving to next"]
+
+
+
+After exploring the codebase to understand it:
+Assistant: I've reviewed the relevant files. Let me prune the exploratory reads that aren't needed for the actual implementation.
+[Uses context_pruning with reason: "exploration complete, starting implementation"]
+
+
+
+After completing any task:
+Assistant: [Finishes task - commit, answer, fix, etc.]
+Before we continue, let me prune the context from that work.
+[Uses context_pruning with reason: "task complete"]
+
diff --git a/lib/prompts/synthetic.txt b/lib/prompts/synthetic.txt
new file mode 100644
index 00000000..36bbca37
--- /dev/null
+++ b/lib/prompts/synthetic.txt
@@ -0,0 +1,10 @@
+
+Anytime you call a tool, you must distill the signal from the noise of that tool result in your message. Always do this for every tools that has actual signals in its ouptut.
+
+
+
+To effectively manage this conversation context, you MUST ALWAYS narrate your findings AS YOU DISCOVER THEM. No tool result (be it read, bash, grep...) should be left unexplained. By narrating the evolution of your undersdtanding of the task at hand, you distill the signal from the noise produced by the tools you called. Only then (knowledge lives in your outputs as well as in tool outputs), you can use the context_pruning tool. Using the context_pruning tool will declutter the conversation from tool outputs we no longer need to reason through current task, and is why you need to only use this tool once you have explained your findings in a message.
+
+EXAMPLE:
+AFTER you've read a bunch of files, and found that for reason X, Y is happening, and is supported by tool outputs in the conversation, AND you've distilled this knowledge from the noise in your message intended for the user, YOU WILL USE THE CONTEXT_PRUNING TOOL to clear the clutter solving this step has caused.
+
diff --git a/lib/synth-instruction.ts b/lib/synth-instruction.ts
new file mode 100644
index 00000000..e3d03c65
--- /dev/null
+++ b/lib/synth-instruction.ts
@@ -0,0 +1,47 @@
+export function isIgnoredUserMessage(msg: any): boolean {
+ if (!msg || msg.role !== 'user') {
+ return false
+ }
+
+ if (msg.ignored || msg.info?.ignored) {
+ return true
+ }
+
+ if (Array.isArray(msg.content) && msg.content.length > 0) {
+ const allPartsIgnored = msg.content.every((part: any) => part?.ignored)
+ if (allPartsIgnored) {
+ return true
+ }
+ }
+
+ return false
+}
+
+export function injectSynthInstruction(messages: any[], instruction: string): boolean {
+ // Find the last user message that is not ignored
+ for (let i = messages.length - 1; i >= 0; i--) {
+ const msg = messages[i]
+ if (msg.role === 'user' && !isIgnoredUserMessage(msg)) {
+ // Avoid double-injecting the same instruction
+ if (typeof msg.content === 'string') {
+ if (msg.content.includes(instruction)) {
+ return false
+ }
+ msg.content = msg.content + '\n\n' + instruction
+ } else if (Array.isArray(msg.content)) {
+ const alreadyInjected = msg.content.some(
+ (part: any) => part?.type === 'text' && typeof part.text === 'string' && part.text.includes(instruction)
+ )
+ if (alreadyInjected) {
+ return false
+ }
+ msg.content.push({
+ type: 'text',
+ text: instruction
+ })
+ }
+ return true
+ }
+ }
+ return false
+}
diff --git a/package.json b/package.json
index 7f422e3f..6a18fdde 100644
--- a/package.json
+++ b/package.json
@@ -8,7 +8,7 @@
"types": "./dist/index.d.ts",
"scripts": {
"clean": "rm -rf dist",
- "build": "npm run clean && tsc",
+ "build": "npm run clean && tsc && cp -r lib/prompts dist/lib/prompts",
"postbuild": "rm -rf dist/logs",
"prepublishOnly": "npm run build",
"dev": "opencode plugin dev",
From d6c879431c56f393416455b924bcb9d09690832c Mon Sep 17 00:00:00 2001
From: spoons-and-mirrors
<212802214+spoons-and-mirrors@users.noreply.github.com>
Date: Fri, 28 Nov 2025 23:54:16 +0100
Subject: [PATCH 2/7] feat: intra response synth instruction injection
cleanup
fix
---
lib/prompts/tool-result-nudge.txt | 3 ++
lib/synth-instruction.ts | 79 ++++++++++++++++++++++++++++++-
2 files changed, 81 insertions(+), 1 deletion(-)
create mode 100644 lib/prompts/tool-result-nudge.txt
diff --git a/lib/prompts/tool-result-nudge.txt b/lib/prompts/tool-result-nudge.txt
new file mode 100644
index 00000000..77f09cfa
--- /dev/null
+++ b/lib/prompts/tool-result-nudge.txt
@@ -0,0 +1,3 @@
+
+You have accumulated several tool outputs. If you have completed a discrete unit of work and distilled relevant understanding in writting for the user to keep, use the context_pruning tool to remove obsolete tool outputs from this conversation and optimize token usage.
+
\ No newline at end of file
diff --git a/lib/synth-instruction.ts b/lib/synth-instruction.ts
index e3d03c65..f27c307b 100644
--- a/lib/synth-instruction.ts
+++ b/lib/synth-instruction.ts
@@ -1,9 +1,73 @@
+export interface ToolResultTracker {
+ seenToolResultIds: Set
+ toolResultCount: number
+}
+
+export function createToolResultTracker(): ToolResultTracker {
+ return {
+ seenToolResultIds: new Set(),
+ toolResultCount: 0
+ }
+}
+
+function countNewToolResults(messages: any[], tracker: ToolResultTracker): number {
+ let newCount = 0
+
+ for (const m of messages) {
+ if (m.role === 'tool' && m.tool_call_id) {
+ const id = String(m.tool_call_id).toLowerCase()
+ if (!tracker.seenToolResultIds.has(id)) {
+ tracker.seenToolResultIds.add(id)
+ newCount++
+ }
+ } else if (m.role === 'user' && Array.isArray(m.content)) {
+ for (const part of m.content) {
+ if (part.type === 'tool_result' && part.tool_use_id) {
+ const id = String(part.tool_use_id).toLowerCase()
+ if (!tracker.seenToolResultIds.has(id)) {
+ tracker.seenToolResultIds.add(id)
+ newCount++
+ }
+ }
+ }
+ }
+ }
+
+ tracker.toolResultCount += newCount
+ return newCount
+}
+
+/**
+ * Counts new tool results and injects nudge instruction every 5th tool result.
+ * Returns true if injection happened.
+ */
+export function maybeInjectToolResultNudge(
+ messages: any[],
+ tracker: ToolResultTracker,
+ nudgeText: string
+): boolean {
+ const prevCount = tracker.toolResultCount
+ const newCount = countNewToolResults(messages, tracker)
+
+ if (newCount > 0) {
+ // Check if we crossed a multiple of 5
+ const prevBucket = Math.floor(prevCount / 5)
+ const newBucket = Math.floor(tracker.toolResultCount / 5)
+ if (newBucket > prevBucket) {
+ // Inject at the END of messages so it's in immediate context
+ return injectNudgeAtEnd(messages, nudgeText)
+ }
+ }
+ return false
+}
+
export function isIgnoredUserMessage(msg: any): boolean {
if (!msg || msg.role !== 'user') {
return false
}
- if (msg.ignored || msg.info?.ignored) {
+ // Skip ignored or synthetic messages
+ if (msg.ignored || msg.info?.ignored || msg.synthetic) {
return true
}
@@ -17,6 +81,19 @@ export function isIgnoredUserMessage(msg: any): boolean {
return false
}
+/**
+ * Injects a nudge message at the END of the messages array as a new user message.
+ * This ensures it's in the model's immediate context, not buried in old messages.
+ */
+export function injectNudgeAtEnd(messages: any[], nudgeText: string): boolean {
+ messages.push({
+ role: 'user',
+ content: nudgeText,
+ synthetic: true
+ })
+ return true
+}
+
export function injectSynthInstruction(messages: any[], instruction: string): boolean {
// Find the last user message that is not ignored
for (let i = messages.length - 1; i >= 0; i--) {
From 80dfccd54d614b4d98711c084fd6bd4c2ff55b81 Mon Sep 17 00:00:00 2001
From: spoons-and-mirrors
<212802214+spoons-and-mirrors@users.noreply.github.com>
Date: Sat, 29 Nov 2025 02:40:47 +0100
Subject: [PATCH 3/7] new prompt
---
lib/prompts/synthetic.txt | 37 +++++++++++++++++++++++++++++++------
1 file changed, 31 insertions(+), 6 deletions(-)
diff --git a/lib/prompts/synthetic.txt b/lib/prompts/synthetic.txt
index 36bbca37..4c157e14 100644
--- a/lib/prompts/synthetic.txt
+++ b/lib/prompts/synthetic.txt
@@ -1,10 +1,35 @@
-
-Anytime you call a tool, you must distill the signal from the noise of that tool result in your message. Always do this for every tools that has actual signals in its ouptut.
+
+After calling a series of tools, YOU ALWAYS HAVE TO distill signals from their results in your next message. This ensures tracability of your reasoning, informs the user of your findings, and is PARAMOUNT to best context window management practices.
+
+THIS IS NON-NEGOTIABLE - YOU ARE EXPECTED TO RESPECT THIS INSTRUCTION THROUGHOUT THE CONVERSATION
+
+
+
+A strong constraint we have in this environment is the context window size. To help keep the conversation focused and clear from the noise, you must use the `context_pruning` tool: at opportune moments, and in an effective manner.
-
-To effectively manage this conversation context, you MUST ALWAYS narrate your findings AS YOU DISCOVER THEM. No tool result (be it read, bash, grep...) should be left unexplained. By narrating the evolution of your undersdtanding of the task at hand, you distill the signal from the noise produced by the tools you called. Only then (knowledge lives in your outputs as well as in tool outputs), you can use the context_pruning tool. Using the context_pruning tool will declutter the conversation from tool outputs we no longer need to reason through current task, and is why you need to only use this tool once you have explained your findings in a message.
+
+To effectively manage conversation context, you MUST ALWAYS narrate your findings AS YOU DISCOVER THEM, BEFORE calling any `context_pruning` tool. No tool result (read, bash, grep, webfetch, etc.) should be left unexplained. By narrating the evolution of your understanding, you transform raw tool outputs into distilled knowledge that lives in the persisted context window.
+
+Tools are VOLATILE - Once this distilled knowledge is in your reply, you can safely use the `context_pruning` tool to declutter the conversation.
-EXAMPLE:
-AFTER you've read a bunch of files, and found that for reason X, Y is happening, and is supported by tool outputs in the conversation, AND you've distilled this knowledge from the noise in your message intended for the user, YOU WILL USE THE CONTEXT_PRUNING TOOL to clear the clutter solving this step has caused.
+WHEN TO USE `context_pruning`:
+- After you complete a discrete unit of work (e.g. confirming a hypothesis, or closing out one branch of investigation).
+- After exploratory bursts of tool calls that led you to a clear conclusion. (or to noise)
+- Before starting a new phase of work where old tool outputs are no longer needed to inform your next actions.
+
+CRITICAL:
+You must ALWAYS narrate your findings in a message BEFORE using the `context_pruning` tool. Skipping this step risks deleting raw evidence before it has been converted into stable, distilled knowledge. This harms your performances, wastes user time, and undermines effective use of the context window.
+
+EXAMPLE WORKFLOW:
+1. You call several tools (read, bash, grep...) to investigate a bug.
+2. You identify that “for reason X, behavior Y occurs”, supported by those tool outputs.
+3. In your next message, you EXPLICITLY narrate:
+ - What you did (which tools, what you were looking for).
+ - What you found (the key facts / signals).
+ - What you concluded (how this affects the task or next step).
+>YOU MUST ALWAYS THINK HIGH SIGNAL LOW NOISE FOR THIS NARRATION
+4. ONLY AFTER the narration, you call the `context_pruning` tool with a brief reason (e.g. "exploration for bug X complete; moving on to next bug").
+
+
From 741cfee7179428d9d1ee287242cbe0e569607d7f Mon Sep 17 00:00:00 2001
From: spoons-and-mirrors
<212802214+spoons-and-mirrors@users.noreply.github.com>
Date: Sat, 29 Nov 2025 02:54:48 +0100
Subject: [PATCH 4/7] semantics changes
---
.../{tool-result-nudge.txt => nudge.txt} | 0
lib/synth-instruction.ts | 20 +++++++++----------
2 files changed, 10 insertions(+), 10 deletions(-)
rename lib/prompts/{tool-result-nudge.txt => nudge.txt} (100%)
diff --git a/lib/prompts/tool-result-nudge.txt b/lib/prompts/nudge.txt
similarity index 100%
rename from lib/prompts/tool-result-nudge.txt
rename to lib/prompts/nudge.txt
diff --git a/lib/synth-instruction.ts b/lib/synth-instruction.ts
index f27c307b..f3152d52 100644
--- a/lib/synth-instruction.ts
+++ b/lib/synth-instruction.ts
@@ -1,16 +1,16 @@
-export interface ToolResultTracker {
+export interface ToolTracker {
seenToolResultIds: Set
toolResultCount: number
}
-export function createToolResultTracker(): ToolResultTracker {
+export function createToolTracker(): ToolTracker {
return {
seenToolResultIds: new Set(),
toolResultCount: 0
}
}
-function countNewToolResults(messages: any[], tracker: ToolResultTracker): number {
+function countToolResults(messages: any[], tracker: ToolTracker): number {
let newCount = 0
for (const m of messages) {
@@ -41,13 +41,13 @@ function countNewToolResults(messages: any[], tracker: ToolResultTracker): numbe
* Counts new tool results and injects nudge instruction every 5th tool result.
* Returns true if injection happened.
*/
-export function maybeInjectToolResultNudge(
+export function injectNudge(
messages: any[],
- tracker: ToolResultTracker,
+ tracker: ToolTracker,
nudgeText: string
): boolean {
const prevCount = tracker.toolResultCount
- const newCount = countNewToolResults(messages, tracker)
+ const newCount = countToolResults(messages, tracker)
if (newCount > 0) {
// Check if we crossed a multiple of 5
@@ -55,7 +55,7 @@ export function maybeInjectToolResultNudge(
const newBucket = Math.floor(tracker.toolResultCount / 5)
if (newBucket > prevBucket) {
// Inject at the END of messages so it's in immediate context
- return injectNudgeAtEnd(messages, nudgeText)
+ return appendNudge(messages, nudgeText)
}
}
return false
@@ -82,10 +82,10 @@ export function isIgnoredUserMessage(msg: any): boolean {
}
/**
- * Injects a nudge message at the END of the messages array as a new user message.
+ * Appends a nudge message at the END of the messages array as a new user message.
* This ensures it's in the model's immediate context, not buried in old messages.
*/
-export function injectNudgeAtEnd(messages: any[], nudgeText: string): boolean {
+function appendNudge(messages: any[], nudgeText: string): boolean {
messages.push({
role: 'user',
content: nudgeText,
@@ -94,7 +94,7 @@ export function injectNudgeAtEnd(messages: any[], nudgeText: string): boolean {
return true
}
-export function injectSynthInstruction(messages: any[], instruction: string): boolean {
+export function injectSynth(messages: any[], instruction: string): boolean {
// Find the last user message that is not ignored
for (let i = messages.length - 1; i >= 0; i--) {
const msg = messages[i]
From b529d995afd4c8fa696e872acc1222914c054f6b Mon Sep 17 00:00:00 2001
From: spoons-and-mirrors
<212802214+spoons-and-mirrors@users.noreply.github.com>
Date: Sat, 29 Nov 2025 02:57:32 +0100
Subject: [PATCH 5/7] typos
---
lib/prompts/nudge.txt | 4 ++--
lib/prompts/synthetic.txt | 2 --
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/lib/prompts/nudge.txt b/lib/prompts/nudge.txt
index 77f09cfa..2daf389f 100644
--- a/lib/prompts/nudge.txt
+++ b/lib/prompts/nudge.txt
@@ -1,3 +1,3 @@
-You have accumulated several tool outputs. If you have completed a discrete unit of work and distilled relevant understanding in writting for the user to keep, use the context_pruning tool to remove obsolete tool outputs from this conversation and optimize token usage.
-
\ No newline at end of file
+You have accumulated several tool outputs. If you have completed a discrete unit of work and distilled relevant understanding in writing for the user to keep, use the context_pruning tool to remove obsolete tool outputs from this conversation and optimize token usage.
+
diff --git a/lib/prompts/synthetic.txt b/lib/prompts/synthetic.txt
index 4c157e14..4a3c80b4 100644
--- a/lib/prompts/synthetic.txt
+++ b/lib/prompts/synthetic.txt
@@ -31,5 +31,3 @@ EXAMPLE WORKFLOW:
>YOU MUST ALWAYS THINK HIGH SIGNAL LOW NOISE FOR THIS NARRATION
4. ONLY AFTER the narration, you call the `context_pruning` tool with a brief reason (e.g. "exploration for bug X complete; moving on to next bug").
-
-
From 875c870b6007727fc7ae99b9176d828483cad10f Mon Sep 17 00:00:00 2001
From: spoons-and-mirrors
<212802214+spoons-and-mirrors@users.noreply.github.com>
Date: Sat, 29 Nov 2025 03:25:47 +0100
Subject: [PATCH 6/7] config and docs
---
README.md | 8 ++++++++
lib/config.ts | 11 +++++++++--
lib/synth-instruction.ts | 11 ++++++-----
3 files changed, 23 insertions(+), 7 deletions(-)
diff --git a/README.md b/README.md
index ff735337..cfea467d 100644
--- a/README.md
+++ b/README.md
@@ -28,6 +28,13 @@ DCP implements two complementary strategies:
**Deduplication** — Fast, zero-cost pruning that identifies repeated tool calls (e.g., reading the same file multiple times) and keeps only the most recent output. Runs instantly with no LLM calls.
**AI Analysis** — Uses a language model to semantically analyze conversation context and identify tool outputs that are no longer relevant to the current task. More thorough but incurs LLM cost.
+
+## Context Pruning Tool
+
+When `strategies.onTool` is enabled, DCP exposes a `context_pruning` tool to Opencode that the AI can call to trigger pruning on demand. To help the AI use this tool effectively, DCP also injects guidance.
+
+When `nudge_freq` is enabled, injects reminders (every `nudge_freq` tool results) prompting the AI to consider pruning when appropriate.
+
## How It Works
DCP is **non-destructive**—pruning state is kept in memory only. When requests go to your LLM, DCP replaces pruned outputs with a placeholder; original session data stays intact.
@@ -46,6 +53,7 @@ DCP uses its own config file (`~/.config/opencode/dcp.jsonc` or `.opencode/dcp.j
| `showModelErrorToasts` | `true` | Show notifications on model fallback |
| `strictModelSelection` | `false` | Only run AI analysis with session or configured model (disables fallback models) |
| `pruning_summary` | `"detailed"` | `"off"`, `"minimal"`, or `"detailed"` |
+| `nudge_freq` | `5` | Remind AI to prune every N tool results (0 = disabled) |
| `protectedTools` | `["task", "todowrite", "todoread", "context_pruning"]` | Tools that are never pruned |
| `strategies.onIdle` | `["deduplication", "ai-analysis"]` | Strategies for automatic pruning |
| `strategies.onTool` | `["deduplication", "ai-analysis"]` | Strategies when AI calls `context_pruning` |
diff --git a/lib/config.ts b/lib/config.ts
index b3e6e79e..6f39c4ab 100644
--- a/lib/config.ts
+++ b/lib/config.ts
@@ -15,6 +15,7 @@ export interface PluginConfig {
showModelErrorToasts?: boolean
strictModelSelection?: boolean
pruning_summary: "off" | "minimal" | "detailed"
+ nudge_freq: number
strategies: {
onIdle: PruningStrategy[]
onTool: PruningStrategy[]
@@ -33,6 +34,7 @@ const defaultConfig: PluginConfig = {
showModelErrorToasts: true,
strictModelSelection: false,
pruning_summary: 'detailed',
+ nudge_freq: 5,
strategies: {
onIdle: ['deduplication', 'ai-analysis'],
onTool: ['deduplication', 'ai-analysis']
@@ -47,6 +49,7 @@ const VALID_CONFIG_KEYS = new Set([
'showModelErrorToasts',
'strictModelSelection',
'pruning_summary',
+ 'nudge_freq',
'strategies'
])
@@ -118,6 +121,8 @@ function createDefaultConfig(): void {
},
// Summary display: "off", "minimal", or "detailed"
"pruning_summary": "detailed",
+ // How often to nudge the AI to prune (every N tool results, 0 = disabled)
+ "nudge_freq": 5,
// Tools that should never be pruned
"protectedTools": ["task", "todowrite", "todoread", "context_pruning"]
}
@@ -196,7 +201,8 @@ export function getConfig(ctx?: PluginInput): ConfigResult {
showModelErrorToasts: globalConfig.showModelErrorToasts ?? config.showModelErrorToasts,
strictModelSelection: globalConfig.strictModelSelection ?? config.strictModelSelection,
strategies: mergeStrategies(config.strategies, globalConfig.strategies as any),
- pruning_summary: globalConfig.pruning_summary ?? config.pruning_summary
+ pruning_summary: globalConfig.pruning_summary ?? config.pruning_summary,
+ nudge_freq: globalConfig.nudge_freq ?? config.nudge_freq
}
logger.info('config', 'Loaded global config', { path: configPaths.global })
}
@@ -226,7 +232,8 @@ export function getConfig(ctx?: PluginInput): ConfigResult {
showModelErrorToasts: projectConfig.showModelErrorToasts ?? config.showModelErrorToasts,
strictModelSelection: projectConfig.strictModelSelection ?? config.strictModelSelection,
strategies: mergeStrategies(config.strategies, projectConfig.strategies as any),
- pruning_summary: projectConfig.pruning_summary ?? config.pruning_summary
+ pruning_summary: projectConfig.pruning_summary ?? config.pruning_summary,
+ nudge_freq: projectConfig.nudge_freq ?? config.nudge_freq
}
logger.info('config', 'Loaded project config (overrides global)', { path: configPaths.project })
}
diff --git a/lib/synth-instruction.ts b/lib/synth-instruction.ts
index f3152d52..20eca63b 100644
--- a/lib/synth-instruction.ts
+++ b/lib/synth-instruction.ts
@@ -38,21 +38,22 @@ function countToolResults(messages: any[], tracker: ToolTracker): number {
}
/**
- * Counts new tool results and injects nudge instruction every 5th tool result.
+ * Counts new tool results and injects nudge instruction every N tool results.
* Returns true if injection happened.
*/
export function injectNudge(
messages: any[],
tracker: ToolTracker,
- nudgeText: string
+ nudgeText: string,
+ freq: number
): boolean {
const prevCount = tracker.toolResultCount
const newCount = countToolResults(messages, tracker)
if (newCount > 0) {
- // Check if we crossed a multiple of 5
- const prevBucket = Math.floor(prevCount / 5)
- const newBucket = Math.floor(tracker.toolResultCount / 5)
+ // Check if we crossed a multiple of freq
+ const prevBucket = Math.floor(prevCount / freq)
+ const newBucket = Math.floor(tracker.toolResultCount / freq)
if (newBucket > prevBucket) {
// Inject at the END of messages so it's in immediate context
return appendNudge(messages, nudgeText)
From 6a199b119426e13f09cc1bfbd6537b6ca2c0bfdd Mon Sep 17 00:00:00 2001
From: Daniel Smolsky
Date: Sat, 29 Nov 2025 01:54:21 -0500
Subject: [PATCH 7/7] feat: integrate synthetic instruction injection into all
fetch handlers
- Add Gemini and OpenAI Responses API format support to synth-instruction.ts
- Integrate injectSynthInstruction and injectNudge into all three fetch handlers
- Add SynthPrompts interface and toolTracker context to FetchWrapperContext
- Wire up toolTracker and prompt loading in index.ts
---
index.ts | 13 +-
lib/fetch-wrapper/gemini.ts | 28 ++++-
lib/fetch-wrapper/index.ts | 16 ++-
lib/fetch-wrapper/openai-chat.ts | 24 +++-
lib/fetch-wrapper/openai-responses.ts | 26 +++-
lib/fetch-wrapper/types.ts | 11 ++
lib/pruning-tool.ts | 2 +-
lib/synth-instruction.ts | 163 ++++++++++++++++++++++++++
8 files changed, 267 insertions(+), 16 deletions(-)
diff --git a/index.ts b/index.ts
index e8ae42c8..84ebed97 100644
--- a/index.ts
+++ b/index.ts
@@ -7,6 +7,8 @@ import { createPluginState } from "./lib/state"
import { installFetchWrapper } from "./lib/fetch-wrapper"
import { createPruningTool } from "./lib/pruning-tool"
import { createEventHandler, createChatParamsHandler } from "./lib/hooks"
+import { createToolTracker } from "./lib/synth-instruction"
+import { loadPrompt } from "./lib/prompt"
const plugin: Plugin = (async (ctx) => {
const { config, migrations } = getConfig(ctx)
@@ -39,8 +41,15 @@ const plugin: Plugin = (async (ctx) => {
ctx.directory
)
- // Install global fetch wrapper for context pruning
- installFetchWrapper(state, logger, ctx.client)
+ // Create tool tracker and load prompts for synthetic instruction injection
+ const toolTracker = createToolTracker()
+ const prompts = {
+ synthInstruction: loadPrompt("synthetic"),
+ nudgeInstruction: loadPrompt("nudge")
+ }
+
+ // Install global fetch wrapper for context pruning and synthetic instruction injection
+ installFetchWrapper(state, logger, ctx.client, config, toolTracker, prompts)
// Log initialization
logger.info("plugin", "DCP initialized", {
diff --git a/lib/fetch-wrapper/gemini.ts b/lib/fetch-wrapper/gemini.ts
index 1055700f..d9dd023c 100644
--- a/lib/fetch-wrapper/gemini.ts
+++ b/lib/fetch-wrapper/gemini.ts
@@ -4,6 +4,7 @@ import {
getAllPrunedIds,
fetchSessionMessages
} from "./types"
+import { injectNudgeGemini, injectSynthGemini } from "../synth-instruction"
/**
* Handles Google/Gemini format (body.contents array with functionResponse parts).
@@ -18,6 +19,25 @@ export async function handleGemini(
return { modified: false, body }
}
+ let modified = false
+
+ // Inject synthetic instructions if onTool strategies are enabled
+ if (ctx.config.strategies.onTool.length > 0) {
+ // Inject periodic nudge based on tool result count
+ if (ctx.config.nudge_freq > 0) {
+ if (injectNudgeGemini(body.contents, ctx.toolTracker, ctx.prompts.nudgeInstruction, ctx.config.nudge_freq)) {
+ ctx.logger.info("fetch", "Injected nudge instruction (Gemini)")
+ modified = true
+ }
+ }
+
+ // Inject synthetic instruction into last user content
+ if (injectSynthGemini(body.contents, ctx.prompts.synthInstruction)) {
+ ctx.logger.info("fetch", "Injected synthetic instruction (Gemini)")
+ modified = true
+ }
+ }
+
// Check for functionResponse parts in any content item
const hasFunctionResponses = body.contents.some((content: any) =>
Array.isArray(content.parts) &&
@@ -25,13 +45,13 @@ export async function handleGemini(
)
if (!hasFunctionResponses) {
- return { modified: false, body }
+ return { modified, body }
}
const { allSessions, allPrunedIds } = await getAllPrunedIds(ctx.client, ctx.state)
if (allPrunedIds.size === 0) {
- return { modified: false, body }
+ return { modified, body }
}
// Find the active session to get the position mapping
@@ -48,7 +68,7 @@ export async function handleGemini(
if (!positionMapping) {
ctx.logger.info("fetch", "No Google tool call mapping found, skipping pruning for Gemini format")
- return { modified: false, body }
+ return { modified, body }
}
// Build position counters to track occurrence of each tool name
@@ -130,5 +150,5 @@ export async function handleGemini(
return { modified: true, body }
}
- return { modified: false, body }
+ return { modified, body }
}
diff --git a/lib/fetch-wrapper/index.ts b/lib/fetch-wrapper/index.ts
index b7fdadbe..d57bda78 100644
--- a/lib/fetch-wrapper/index.ts
+++ b/lib/fetch-wrapper/index.ts
@@ -1,11 +1,13 @@
import type { PluginState } from "../state"
import type { Logger } from "../logger"
-import type { FetchHandlerContext } from "./types"
+import type { FetchHandlerContext, SynthPrompts } from "./types"
+import type { ToolTracker } from "../synth-instruction"
+import type { PluginConfig } from "../config"
import { handleOpenAIChatAndAnthropic } from "./openai-chat"
import { handleGemini } from "./gemini"
import { handleOpenAIResponses } from "./openai-responses"
-export type { FetchHandlerContext, FetchHandlerResult } from "./types"
+export type { FetchHandlerContext, FetchHandlerResult, SynthPrompts } from "./types"
/**
* Creates a wrapped global fetch that intercepts API calls and performs
@@ -20,14 +22,20 @@ export type { FetchHandlerContext, FetchHandlerResult } from "./types"
export function installFetchWrapper(
state: PluginState,
logger: Logger,
- client: any
+ client: any,
+ config: PluginConfig,
+ toolTracker: ToolTracker,
+ prompts: SynthPrompts
): () => void {
const originalGlobalFetch = globalThis.fetch
const ctx: FetchHandlerContext = {
state,
logger,
- client
+ client,
+ config,
+ toolTracker,
+ prompts
}
globalThis.fetch = async (input: any, init?: any) => {
diff --git a/lib/fetch-wrapper/openai-chat.ts b/lib/fetch-wrapper/openai-chat.ts
index b0df0632..ba137dc1 100644
--- a/lib/fetch-wrapper/openai-chat.ts
+++ b/lib/fetch-wrapper/openai-chat.ts
@@ -6,6 +6,7 @@ import {
getMostRecentActiveSession
} from "./types"
import { cacheToolParametersFromMessages } from "../tool-cache"
+import { injectNudge, injectSynth } from "../synth-instruction"
/**
* Handles OpenAI Chat Completions format (body.messages with role='tool').
@@ -23,6 +24,25 @@ export async function handleOpenAIChatAndAnthropic(
// Cache tool parameters from messages
cacheToolParametersFromMessages(body.messages, ctx.state)
+ let modified = false
+
+ // Inject synthetic instructions if onTool strategies are enabled
+ if (ctx.config.strategies.onTool.length > 0) {
+ // Inject periodic nudge based on tool result count
+ if (ctx.config.nudge_freq > 0) {
+ if (injectNudge(body.messages, ctx.toolTracker, ctx.prompts.nudgeInstruction, ctx.config.nudge_freq)) {
+ ctx.logger.info("fetch", "Injected nudge instruction")
+ modified = true
+ }
+ }
+
+ // Inject synthetic instruction into last user message
+ if (injectSynth(body.messages, ctx.prompts.synthInstruction)) {
+ ctx.logger.info("fetch", "Injected synthetic instruction")
+ modified = true
+ }
+ }
+
// Check for tool messages in both formats:
// 1. OpenAI style: role === 'tool'
// 2. Anthropic style: role === 'user' with content containing tool_result
@@ -39,7 +59,7 @@ export async function handleOpenAIChatAndAnthropic(
const { allSessions, allPrunedIds } = await getAllPrunedIds(ctx.client, ctx.state)
if (toolMessages.length === 0 || allPrunedIds.size === 0) {
- return { modified: false, body }
+ return { modified, body }
}
let replacedCount = 0
@@ -103,5 +123,5 @@ export async function handleOpenAIChatAndAnthropic(
return { modified: true, body }
}
- return { modified: false, body }
+ return { modified, body }
}
diff --git a/lib/fetch-wrapper/openai-responses.ts b/lib/fetch-wrapper/openai-responses.ts
index f8305eb6..785852de 100644
--- a/lib/fetch-wrapper/openai-responses.ts
+++ b/lib/fetch-wrapper/openai-responses.ts
@@ -6,6 +6,7 @@ import {
getMostRecentActiveSession
} from "./types"
import { cacheToolParametersFromInput } from "../tool-cache"
+import { injectNudgeResponses, injectSynthResponses } from "../synth-instruction"
/**
* Handles OpenAI Responses API format (body.input array with function_call_output items).
@@ -23,17 +24,36 @@ export async function handleOpenAIResponses(
// Cache tool parameters from input
cacheToolParametersFromInput(body.input, ctx.state)
+ let modified = false
+
+ // Inject synthetic instructions if onTool strategies are enabled
+ if (ctx.config.strategies.onTool.length > 0) {
+ // Inject periodic nudge based on tool result count
+ if (ctx.config.nudge_freq > 0) {
+ if (injectNudgeResponses(body.input, ctx.toolTracker, ctx.prompts.nudgeInstruction, ctx.config.nudge_freq)) {
+ ctx.logger.info("fetch", "Injected nudge instruction (Responses API)")
+ modified = true
+ }
+ }
+
+ // Inject synthetic instruction into last user message
+ if (injectSynthResponses(body.input, ctx.prompts.synthInstruction)) {
+ ctx.logger.info("fetch", "Injected synthetic instruction (Responses API)")
+ modified = true
+ }
+ }
+
// Check for function_call_output items
const functionOutputs = body.input.filter((item: any) => item.type === 'function_call_output')
if (functionOutputs.length === 0) {
- return { modified: false, body }
+ return { modified, body }
}
const { allSessions, allPrunedIds } = await getAllPrunedIds(ctx.client, ctx.state)
if (allPrunedIds.size === 0) {
- return { modified: false, body }
+ return { modified, body }
}
let replacedCount = 0
@@ -77,5 +97,5 @@ export async function handleOpenAIResponses(
return { modified: true, body }
}
- return { modified: false, body }
+ return { modified, body }
}
diff --git a/lib/fetch-wrapper/types.ts b/lib/fetch-wrapper/types.ts
index c7f5b521..91182d05 100644
--- a/lib/fetch-wrapper/types.ts
+++ b/lib/fetch-wrapper/types.ts
@@ -1,14 +1,25 @@
import type { PluginState } from "../state"
import type { Logger } from "../logger"
+import type { ToolTracker } from "../synth-instruction"
+import type { PluginConfig } from "../config"
/** The message used to replace pruned tool output content */
export const PRUNED_CONTENT_MESSAGE = '[Output removed to save context - information superseded or no longer needed]'
+/** Prompts used for synthetic instruction injection */
+export interface SynthPrompts {
+ synthInstruction: string
+ nudgeInstruction: string
+}
+
/** Context passed to each format-specific handler */
export interface FetchHandlerContext {
state: PluginState
logger: Logger
client: any
+ config: PluginConfig
+ toolTracker: ToolTracker
+ prompts: SynthPrompts
}
/** Result from a format handler indicating what happened */
diff --git a/lib/pruning-tool.ts b/lib/pruning-tool.ts
index 81a7fa6c..e6ac011a 100644
--- a/lib/pruning-tool.ts
+++ b/lib/pruning-tool.ts
@@ -71,7 +71,7 @@ export function createPruningTool(janitor: Janitor, config: PluginConfig): Retur
return "No prunable tool outputs found. Context is already optimized.\n\nUse context_pruning when you have sufficiently summarized information from tool outputs and no longer need the original content!"
}
- return janitor.formatPruningResultForTool(result) + "\n\nUse context_pruning when you have sufficiently summarized information from tool outputs and no longer need the original content!"
+ return janitor.formatPruningResultForTool(result) + "\n\nKeep using context_pruning when you have sufficiently summarized information from tool outputs and no longer need the original content!"
},
})
}
diff --git a/lib/synth-instruction.ts b/lib/synth-instruction.ts
index 20eca63b..e171437a 100644
--- a/lib/synth-instruction.ts
+++ b/lib/synth-instruction.ts
@@ -10,6 +10,10 @@ export function createToolTracker(): ToolTracker {
}
}
+// ============================================================================
+// OpenAI Chat / Anthropic Format
+// ============================================================================
+
function countToolResults(messages: any[], tracker: ToolTracker): number {
let newCount = 0
@@ -123,3 +127,162 @@ export function injectSynth(messages: any[], instruction: string): boolean {
}
return false
}
+
+// ============================================================================
+// Google/Gemini Format (body.contents with parts)
+// ============================================================================
+
+function countToolResultsGemini(contents: any[], tracker: ToolTracker): number {
+ let newCount = 0
+
+ for (const content of contents) {
+ if (!Array.isArray(content.parts)) continue
+
+ for (const part of content.parts) {
+ if (part.functionResponse) {
+ // Use function name + index as a pseudo-ID since Gemini doesn't have tool call IDs
+ const funcName = part.functionResponse.name?.toLowerCase() || 'unknown'
+ const pseudoId = `gemini:${funcName}:${tracker.seenToolResultIds.size}`
+ if (!tracker.seenToolResultIds.has(pseudoId)) {
+ tracker.seenToolResultIds.add(pseudoId)
+ newCount++
+ }
+ }
+ }
+ }
+
+ tracker.toolResultCount += newCount
+ return newCount
+}
+
+/**
+ * Counts new tool results and injects nudge instruction every N tool results (Gemini format).
+ * Returns true if injection happened.
+ */
+export function injectNudgeGemini(
+ contents: any[],
+ tracker: ToolTracker,
+ nudgeText: string,
+ freq: number
+): boolean {
+ const prevCount = tracker.toolResultCount
+ const newCount = countToolResultsGemini(contents, tracker)
+
+ if (newCount > 0) {
+ const prevBucket = Math.floor(prevCount / freq)
+ const newBucket = Math.floor(tracker.toolResultCount / freq)
+ if (newBucket > prevBucket) {
+ return appendNudgeGemini(contents, nudgeText)
+ }
+ }
+ return false
+}
+
+function appendNudgeGemini(contents: any[], nudgeText: string): boolean {
+ contents.push({
+ role: 'user',
+ parts: [{ text: nudgeText }]
+ })
+ return true
+}
+
+export function injectSynthGemini(contents: any[], instruction: string): boolean {
+ // Find the last user content that is not ignored
+ for (let i = contents.length - 1; i >= 0; i--) {
+ const content = contents[i]
+ if (content.role === 'user' && Array.isArray(content.parts)) {
+ // Check if already injected
+ const alreadyInjected = content.parts.some(
+ (part: any) => part?.text && typeof part.text === 'string' && part.text.includes(instruction)
+ )
+ if (alreadyInjected) {
+ return false
+ }
+ content.parts.push({ text: instruction })
+ return true
+ }
+ }
+ return false
+}
+
+// ============================================================================
+// OpenAI Responses API Format (body.input with type-based items)
+// ============================================================================
+
+function countToolResultsResponses(input: any[], tracker: ToolTracker): number {
+ let newCount = 0
+
+ for (const item of input) {
+ if (item.type === 'function_call_output' && item.call_id) {
+ const id = String(item.call_id).toLowerCase()
+ if (!tracker.seenToolResultIds.has(id)) {
+ tracker.seenToolResultIds.add(id)
+ newCount++
+ }
+ }
+ }
+
+ tracker.toolResultCount += newCount
+ return newCount
+}
+
+/**
+ * Counts new tool results and injects nudge instruction every N tool results (Responses API format).
+ * Returns true if injection happened.
+ */
+export function injectNudgeResponses(
+ input: any[],
+ tracker: ToolTracker,
+ nudgeText: string,
+ freq: number
+): boolean {
+ const prevCount = tracker.toolResultCount
+ const newCount = countToolResultsResponses(input, tracker)
+
+ if (newCount > 0) {
+ const prevBucket = Math.floor(prevCount / freq)
+ const newBucket = Math.floor(tracker.toolResultCount / freq)
+ if (newBucket > prevBucket) {
+ return appendNudgeResponses(input, nudgeText)
+ }
+ }
+ return false
+}
+
+function appendNudgeResponses(input: any[], nudgeText: string): boolean {
+ input.push({
+ type: 'message',
+ role: 'user',
+ content: nudgeText
+ })
+ return true
+}
+
+export function injectSynthResponses(input: any[], instruction: string): boolean {
+ // Find the last user message in the input array
+ for (let i = input.length - 1; i >= 0; i--) {
+ const item = input[i]
+ if (item.type === 'message' && item.role === 'user') {
+ // Check if already injected
+ if (typeof item.content === 'string') {
+ if (item.content.includes(instruction)) {
+ return false
+ }
+ item.content = item.content + '\n\n' + instruction
+ } else if (Array.isArray(item.content)) {
+ const alreadyInjected = item.content.some(
+ (part: any) => part?.type === 'input_text' && typeof part.text === 'string' && part.text.includes(instruction)
+ )
+ if (alreadyInjected) {
+ return false
+ }
+ item.content.push({
+ type: 'input_text',
+ text: instruction
+ })
+ }
+ return true
+ }
+ }
+ return false
+}