From 1000dcc4d228574b72fa637b3396806af449b564 Mon Sep 17 00:00:00 2001 From: daniel-lxs Date: Thu, 20 Nov 2025 18:00:02 -0500 Subject: [PATCH 1/3] feat: store reasoning in conversation history for all providers --- src/core/task-persistence/apiMessages.ts | 1 + src/core/task/Task.ts | 75 ++++++++++++++++--- .../__tests__/reasoning-preservation.test.ts | 58 ++++++++++++++ 3 files changed, 124 insertions(+), 10 deletions(-) diff --git a/src/core/task-persistence/apiMessages.ts b/src/core/task-persistence/apiMessages.ts index 5beda00ddc4..1fa1e6df5b8 100644 --- a/src/core/task-persistence/apiMessages.ts +++ b/src/core/task-persistence/apiMessages.ts @@ -17,6 +17,7 @@ export type ApiMessage = Anthropic.MessageParam & { type?: "reasoning" summary?: any[] encrypted_content?: string + text?: string } export async function readApiMessages({ diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index 7c0355e4982..cc8bee008ff 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -646,19 +646,21 @@ export class Task extends EventEmitter implements TaskLike { return readApiMessages({ taskId: this.taskId, globalStoragePath: this.globalStoragePath }) } - private async addToApiConversationHistory(message: Anthropic.MessageParam) { + private async addToApiConversationHistory(message: Anthropic.MessageParam, reasoning?: string) { // Capture the encrypted_content / thought signatures from the provider (e.g., OpenAI Responses API, Google GenAI) if present. // We only persist data reported by the current response body. const handler = this.api as ApiHandler & { getResponseId?: () => string | undefined getEncryptedContent?: () => { encrypted_content: string; id?: string } | undefined getThoughtSignature?: () => string | undefined + getSummary?: () => any[] | undefined } if (message.role === "assistant") { const responseId = handler.getResponseId?.() const reasoningData = handler.getEncryptedContent?.() const thoughtSignature = handler.getThoughtSignature?.() + const reasoningSummary = handler.getSummary?.() // Start from the original assistant message const messageWithTs: any = { @@ -667,10 +669,30 @@ export class Task extends EventEmitter implements TaskLike { ts: Date.now(), } - // If we have encrypted_content, embed it as the first content block on the assistant message. + // If we have reasoning text (from streaming), store it as a reasoning block + // This is the primary path for most providers (Anthropic, Gemini, Mistral, etc.) + if (reasoning) { + const reasoningBlock = { + type: "reasoning", + text: reasoning, + summary: reasoningSummary ?? ([] as any[]), + } + + if (typeof messageWithTs.content === "string") { + messageWithTs.content = [ + reasoningBlock, + { type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam, + ] + } else if (Array.isArray(messageWithTs.content)) { + messageWithTs.content = [reasoningBlock, ...messageWithTs.content] + } else if (!messageWithTs.content) { + messageWithTs.content = [reasoningBlock] + } + } + // If we have encrypted_content (OpenAI Native specific), embed it as the first content block // This keeps reasoning + assistant atomic for context management while still allowing providers // to receive a separate reasoning item when we build the request. - if (reasoningData?.encrypted_content) { + else if (reasoningData?.encrypted_content) { const reasoningBlock = { type: "reasoning", summary: [] as any[], @@ -2661,10 +2683,13 @@ export class Task extends EventEmitter implements TaskLike { } } - await this.addToApiConversationHistory({ - role: "assistant", - content: assistantContent, - }) + await this.addToApiConversationHistory( + { + role: "assistant", + content: assistantContent, + }, + reasoningMessage || undefined, + ) TelemetryService.instance.captureConversationMessage(this.taskId, "assistant") @@ -3340,6 +3365,7 @@ export class Task extends EventEmitter implements TaskLike { for (const msg of messages) { // Legacy path: standalone reasoning items stored as separate messages + // Only include in request if it has encrypted_content (OpenAI Native specific) if (msg.type === "reasoning" && msg.encrypted_content) { cleanConversationHistory.push({ type: "reasoning", @@ -3350,6 +3376,11 @@ export class Task extends EventEmitter implements TaskLike { continue } + // Skip standalone reasoning items with plain text (stored for history, not for API requests) + if (msg.type === "reasoning") { + continue + } + // Preferred path: assistant message with embedded reasoning as first content block if (msg.role === "assistant") { const rawContent = msg.content @@ -3364,13 +3395,18 @@ export class Task extends EventEmitter implements TaskLike { const [first, ...rest] = contentArray - const hasEmbeddedReasoning = + // Check for embedded reasoning with encrypted_content (OpenAI Native) + const hasEncryptedReasoning = first && (first as any).type === "reasoning" && typeof (first as any).encrypted_content === "string" - if (hasEmbeddedReasoning) { + // Check for embedded reasoning with plain text (other providers - store but don't send) + const hasPlainTextReasoning = + first && (first as any).type === "reasoning" && typeof (first as any).text === "string" + + if (hasEncryptedReasoning) { const reasoningBlock = first as any - // Emit a separate reasoning item for the provider + // Emit a separate reasoning item for the provider (OpenAI Native needs this) cleanConversationHistory.push({ type: "reasoning", summary: reasoningBlock.summary ?? [], @@ -3394,6 +3430,25 @@ export class Task extends EventEmitter implements TaskLike { content: assistantContent, } satisfies Anthropic.Messages.MessageParam) + continue + } else if (hasPlainTextReasoning) { + // For plain text reasoning, just strip it out and send the assistant message without it + // The reasoning was stored for history purposes, not for API requests + let assistantContent: Anthropic.Messages.MessageParam["content"] + + if (rest.length === 0) { + assistantContent = "" + } else if (rest.length === 1 && rest[0].type === "text") { + assistantContent = (rest[0] as Anthropic.Messages.TextBlockParam).text + } else { + assistantContent = rest + } + + cleanConversationHistory.push({ + role: "assistant", + content: assistantContent, + } satisfies Anthropic.Messages.MessageParam) + continue } } diff --git a/src/core/task/__tests__/reasoning-preservation.test.ts b/src/core/task/__tests__/reasoning-preservation.test.ts index d486339fc83..7a73d2b1d07 100644 --- a/src/core/task/__tests__/reasoning-preservation.test.ts +++ b/src/core/task/__tests__/reasoning-preservation.test.ts @@ -371,4 +371,62 @@ describe("Task reasoning preservation", () => { text: "Here is my response.", }) }) + + it("should store plain text reasoning from streaming for all providers", async () => { + const task = new Task({ + provider: mockProvider as ClineProvider, + apiConfiguration: mockApiConfiguration, + task: "Test task", + startTask: false, + }) + + // Avoid disk writes in this test + ;(task as any).saveApiConversationHistory = vi.fn().mockResolvedValue(undefined) + + // Mock API handler without getEncryptedContent (like Anthropic, Gemini, etc.) + task.api = { + getModel: vi.fn().mockReturnValue({ + id: "test-model", + info: { + contextWindow: 16000, + supportsPromptCache: true, + }, + }), + } as any + + // Simulate the new path: passing reasoning as a parameter + const reasoningText = "Let me analyze this carefully. First, I'll consider the requirements..." + const assistantText = "Here is my response." + + await (task as any).addToApiConversationHistory( + { + role: "assistant", + content: [{ type: "text", text: assistantText }], + }, + reasoningText, + ) + + expect(task.apiConversationHistory).toHaveLength(1) + const stored = task.apiConversationHistory[0] as any + + expect(stored.role).toBe("assistant") + expect(Array.isArray(stored.content)).toBe(true) + + const [reasoningBlock, textBlock] = stored.content + + // Verify reasoning is stored with plain text, not encrypted + expect(reasoningBlock).toMatchObject({ + type: "reasoning", + text: reasoningText, + summary: [], + }) + + // Verify there's no encrypted_content field (that's only for OpenAI Native) + expect(reasoningBlock.encrypted_content).toBeUndefined() + + expect(textBlock).toMatchObject({ + type: "text", + text: assistantText, + }) + }) }) From 54891de13fbdf6c02c06894360ee9340bf431f94 Mon Sep 17 00:00:00 2001 From: daniel-lxs Date: Thu, 20 Nov 2025 19:42:53 -0500 Subject: [PATCH 2/3] refactor: address review feedback - Move comments inside else block - Combine reasoning checks into single if block - Make comments more concise --- src/core/task/Task.ts | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index cc8bee008ff..c5d7751495c 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -669,8 +669,7 @@ export class Task extends EventEmitter implements TaskLike { ts: Date.now(), } - // If we have reasoning text (from streaming), store it as a reasoning block - // This is the primary path for most providers (Anthropic, Gemini, Mistral, etc.) + // Store reasoning as plain text (most providers) or encrypted (OpenAI Native) if (reasoning) { const reasoningBlock = { type: "reasoning", @@ -688,11 +687,8 @@ export class Task extends EventEmitter implements TaskLike { } else if (!messageWithTs.content) { messageWithTs.content = [reasoningBlock] } - } - // If we have encrypted_content (OpenAI Native specific), embed it as the first content block - // This keeps reasoning + assistant atomic for context management while still allowing providers - // to receive a separate reasoning item when we build the request. - else if (reasoningData?.encrypted_content) { + } else if (reasoningData?.encrypted_content) { + // OpenAI Native encrypted reasoning (for API continuity) const reasoningBlock = { type: "reasoning", summary: [] as any[], @@ -3364,20 +3360,18 @@ export class Task extends EventEmitter implements TaskLike { const cleanConversationHistory: (Anthropic.Messages.MessageParam | ReasoningItemForRequest)[] = [] for (const msg of messages) { - // Legacy path: standalone reasoning items stored as separate messages - // Only include in request if it has encrypted_content (OpenAI Native specific) - if (msg.type === "reasoning" && msg.encrypted_content) { - cleanConversationHistory.push({ - type: "reasoning", - summary: msg.summary, - encrypted_content: msg.encrypted_content!, - ...(msg.id ? { id: msg.id } : {}), - }) - continue - } - - // Skip standalone reasoning items with plain text (stored for history, not for API requests) + // Handle standalone reasoning items stored as separate messages if (msg.type === "reasoning") { + // Only include in request if it has encrypted_content (OpenAI Native specific) + // Skip plain text reasoning (stored for history, not for API requests) + if (msg.encrypted_content) { + cleanConversationHistory.push({ + type: "reasoning", + summary: msg.summary, + encrypted_content: msg.encrypted_content!, + ...(msg.id ? { id: msg.id } : {}), + }) + } continue } From 27bac70cce168f6de8bb29386fbfd3bfd4c9f684 Mon Sep 17 00:00:00 2001 From: daniel-lxs Date: Thu, 20 Nov 2025 19:44:05 -0500 Subject: [PATCH 3/3] refactor: make comments more concise --- src/core/task/Task.ts | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index c5d7751495c..bfbdd843dff 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -669,7 +669,7 @@ export class Task extends EventEmitter implements TaskLike { ts: Date.now(), } - // Store reasoning as plain text (most providers) or encrypted (OpenAI Native) + // Store reasoning: plain text (most providers) or encrypted (OpenAI Native) if (reasoning) { const reasoningBlock = { type: "reasoning", @@ -688,7 +688,7 @@ export class Task extends EventEmitter implements TaskLike { messageWithTs.content = [reasoningBlock] } } else if (reasoningData?.encrypted_content) { - // OpenAI Native encrypted reasoning (for API continuity) + // OpenAI Native encrypted reasoning const reasoningBlock = { type: "reasoning", summary: [] as any[], @@ -3360,10 +3360,8 @@ export class Task extends EventEmitter implements TaskLike { const cleanConversationHistory: (Anthropic.Messages.MessageParam | ReasoningItemForRequest)[] = [] for (const msg of messages) { - // Handle standalone reasoning items stored as separate messages + // Standalone reasoning: send encrypted, skip plain text if (msg.type === "reasoning") { - // Only include in request if it has encrypted_content (OpenAI Native specific) - // Skip plain text reasoning (stored for history, not for API requests) if (msg.encrypted_content) { cleanConversationHistory.push({ type: "reasoning", @@ -3389,18 +3387,16 @@ export class Task extends EventEmitter implements TaskLike { const [first, ...rest] = contentArray - // Check for embedded reasoning with encrypted_content (OpenAI Native) + // Embedded reasoning: encrypted (send) or plain text (skip) const hasEncryptedReasoning = first && (first as any).type === "reasoning" && typeof (first as any).encrypted_content === "string" - - // Check for embedded reasoning with plain text (other providers - store but don't send) const hasPlainTextReasoning = first && (first as any).type === "reasoning" && typeof (first as any).text === "string" if (hasEncryptedReasoning) { const reasoningBlock = first as any - // Emit a separate reasoning item for the provider (OpenAI Native needs this) + // Send as separate reasoning item (OpenAI Native) cleanConversationHistory.push({ type: "reasoning", summary: reasoningBlock.summary ?? [], @@ -3408,7 +3404,7 @@ export class Task extends EventEmitter implements TaskLike { ...(reasoningBlock.id ? { id: reasoningBlock.id } : {}), }) - // Build assistant message without the embedded reasoning block + // Send assistant message without reasoning let assistantContent: Anthropic.Messages.MessageParam["content"] if (rest.length === 0) { @@ -3426,8 +3422,7 @@ export class Task extends EventEmitter implements TaskLike { continue } else if (hasPlainTextReasoning) { - // For plain text reasoning, just strip it out and send the assistant message without it - // The reasoning was stored for history purposes, not for API requests + // Strip plain text reasoning, send assistant message only let assistantContent: Anthropic.Messages.MessageParam["content"] if (rest.length === 0) {