From b04a4a5e439b31d75e965b1cd19c813d32b9f17f Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Wed, 4 Feb 2026 15:04:32 -0700 Subject: [PATCH 1/5] fix(ai-sdk): preserve reasoning parts in message conversion --- src/api/transform/__tests__/ai-sdk.spec.ts | 46 ++++++ src/api/transform/ai-sdk.ts | 31 +++- src/core/task/Task.ts | 25 ++- .../__tests__/reasoning-preservation.test.ts | 147 +++++++----------- 4 files changed, 153 insertions(+), 96 deletions(-) diff --git a/src/api/transform/__tests__/ai-sdk.spec.ts b/src/api/transform/__tests__/ai-sdk.spec.ts index fb4e3b9e2f2..1f7dbccd34e 100644 --- a/src/api/transform/__tests__/ai-sdk.spec.ts +++ b/src/api/transform/__tests__/ai-sdk.spec.ts @@ -308,6 +308,52 @@ describe("AI SDK conversion utilities", () => { content: [{ type: "text", text: "" }], }) }) + + it("converts assistant reasoning blocks", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "assistant", + content: [ + { type: "reasoning" as any, text: "Thinking..." }, + { type: "text", text: "Answer" }, + ], + }, + ] + + const result = convertToAiSdkMessages(messages) + + expect(result).toHaveLength(1) + expect(result[0]).toEqual({ + role: "assistant", + content: [ + { type: "reasoning", text: "Thinking..." }, + { type: "text", text: "Answer" }, + ], + }) + }) + + it("converts assistant thinking blocks to reasoning", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "assistant", + content: [ + { type: "thinking" as any, thinking: "Deep thought", signature: "sig" }, + { type: "text", text: "OK" }, + ], + }, + ] + + const result = convertToAiSdkMessages(messages) + + expect(result).toHaveLength(1) + expect(result[0]).toEqual({ + role: "assistant", + content: [ + { type: "reasoning", text: "Deep thought" }, + { type: "text", text: "OK" }, + ], + }) + }) }) describe("convertToolsForAiSdk", () => { diff --git a/src/api/transform/ai-sdk.ts b/src/api/transform/ai-sdk.ts index c6f37be694d..c1b939e66e5 100644 --- a/src/api/transform/ai-sdk.ts +++ b/src/api/transform/ai-sdk.ts @@ -126,6 +126,7 @@ export function convertToAiSdkMessages( } } else if (message.role === "assistant") { const textParts: string[] = [] + const reasoningParts: string[] = [] const toolCalls: Array<{ type: "tool-call" toolCallId: string @@ -136,21 +137,49 @@ export function convertToAiSdkMessages( for (const part of message.content) { if (part.type === "text") { textParts.push(part.text) - } else if (part.type === "tool_use") { + continue + } + + if (part.type === "tool_use") { toolCalls.push({ type: "tool-call", toolCallId: part.id, toolName: part.name, input: part.input, }) + continue + } + + // Some providers (DeepSeek, Gemini, etc.) require reasoning to be round-tripped. + // Task stores reasoning as a content block (type: "reasoning") and Anthropic extended + // thinking as (type: "thinking"). Convert both to AI SDK's reasoning part. + if ((part as unknown as { type?: string }).type === "reasoning") { + const text = (part as unknown as { text?: string }).text + if (typeof text === "string" && text.length > 0) { + reasoningParts.push(text) + } + continue + } + + if ((part as unknown as { type?: string }).type === "thinking") { + const thinking = (part as unknown as { thinking?: string }).thinking + if (typeof thinking === "string" && thinking.length > 0) { + reasoningParts.push(thinking) + } + continue } } const content: Array< + | { type: "reasoning"; text: string } | { type: "text"; text: string } | { type: "tool-call"; toolCallId: string; toolName: string; input: unknown } > = [] + if (reasoningParts.length > 0) { + content.push({ type: "reasoning", text: reasoningParts.join("") }) + } + if (textParts.length > 0) { content.push({ type: "text", text: textParts.join("\n") }) } diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index 107cfdf9e9e..d19d205a731 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -4564,14 +4564,29 @@ export class Task extends EventEmitter implements TaskLike { continue } else if (hasPlainTextReasoning) { - // Check if the model's preserveReasoning flag is set - // If true, include the reasoning block in API requests - // If false/undefined, strip it out (stored for history only, not sent back to API) - const shouldPreserveForApi = this.api.getModel().info.preserveReasoning === true + // Preserve plain-text reasoning blocks for: + // - models explicitly opting in via preserveReasoning + // - AI SDK providers (provider packages decide what to include in the native request) + const aiSdkProviders = new Set([ + "deepseek", + "fireworks", + "moonshot", + "mistral", + "groq", + "xai", + "cerebras", + "sambanova", + "huggingface", + "openai-compatible", + ]) + + const shouldPreserveForApi = + this.api.getModel().info.preserveReasoning === true || + aiSdkProviders.has(this.apiConfiguration.apiProvider ?? "") + let assistantContent: Anthropic.Messages.MessageParam["content"] if (shouldPreserveForApi) { - // Include reasoning block in the content sent to API assistantContent = contentArray } else { // Strip reasoning out - stored for history only, not sent back to API diff --git a/src/core/task/__tests__/reasoning-preservation.test.ts b/src/core/task/__tests__/reasoning-preservation.test.ts index 3bf2dec2986..2a3978e9111 100644 --- a/src/core/task/__tests__/reasoning-preservation.test.ts +++ b/src/core/task/__tests__/reasoning-preservation.test.ts @@ -219,41 +219,33 @@ describe("Task reasoning preservation", () => { // Spy on addToApiConversationHistory const addToApiHistorySpy = vi.spyOn(task as any, "addToApiConversationHistory") - // Simulate what happens in the streaming loop when preserveReasoning is true - let finalAssistantMessage = assistantMessage - if (reasoningMessage && task.api.getModel().info.preserveReasoning) { - finalAssistantMessage = `${reasoningMessage}\n${assistantMessage}` - } - - await (task as any).addToApiConversationHistory({ - role: "assistant", - content: [{ type: "text", text: finalAssistantMessage }], - }) + await (task as any).addToApiConversationHistory( + { + role: "assistant", + content: [{ type: "text", text: assistantMessage }], + }, + reasoningMessage, + ) - // Verify that reasoning was prepended in tags to the assistant message - expect(addToApiHistorySpy).toHaveBeenCalledWith({ - role: "assistant", - content: [ - { - type: "text", - text: "Let me think about this step by step. First, I need to...\nHere is my response to your question.", - }, - ], - }) + // Verify that reasoning was stored as a separate reasoning block + expect(addToApiHistorySpy).toHaveBeenCalledWith( + { + role: "assistant", + content: [{ type: "text", text: assistantMessage }], + }, + reasoningMessage, + ) - // Verify the API conversation history contains the message with reasoning + // Verify the API conversation history contains the message with reasoning block expect(task.apiConversationHistory).toHaveLength(1) - expect((task.apiConversationHistory[0].content[0] as { text: string }).text).toContain("") - expect((task.apiConversationHistory[0].content[0] as { text: string }).text).toContain("") - expect((task.apiConversationHistory[0].content[0] as { text: string }).text).toContain( - "Here is my response to your question.", - ) - expect((task.apiConversationHistory[0].content[0] as { text: string }).text).toContain( - "Let me think about this step by step. First, I need to...", - ) + expect(task.apiConversationHistory[0].role).toBe("assistant") + expect(task.apiConversationHistory[0].content).toEqual([ + { type: "reasoning", text: reasoningMessage, summary: [] }, + { type: "text", text: assistantMessage }, + ]) }) - it("should NOT append reasoning to assistant message when preserveReasoning is false", async () => { + it("should store reasoning blocks even when preserveReasoning is false", async () => { // Create a task instance const task = new Task({ provider: mockProvider as ClineProvider, @@ -279,36 +271,25 @@ describe("Task reasoning preservation", () => { // Mock the API conversation history task.apiConversationHistory = [] - // Simulate adding an assistant message with reasoning + // Add an assistant message while passing reasoning separately (Task does this in normal streaming). const assistantMessage = "Here is my response to your question." const reasoningMessage = "Let me think about this step by step. First, I need to..." - // Spy on addToApiConversationHistory - const addToApiHistorySpy = vi.spyOn(task as any, "addToApiConversationHistory") - - // Simulate what happens in the streaming loop when preserveReasoning is false - let finalAssistantMessage = assistantMessage - if (reasoningMessage && task.api.getModel().info.preserveReasoning) { - finalAssistantMessage = `${reasoningMessage}\n${assistantMessage}` - } - - await (task as any).addToApiConversationHistory({ - role: "assistant", - content: [{ type: "text", text: finalAssistantMessage }], - }) - - // Verify that reasoning was NOT appended to the assistant message - expect(addToApiHistorySpy).toHaveBeenCalledWith({ - role: "assistant", - content: [{ type: "text", text: "Here is my response to your question." }], - }) + await (task as any).addToApiConversationHistory( + { + role: "assistant", + content: [{ type: "text", text: assistantMessage }], + }, + reasoningMessage, + ) - // Verify the API conversation history does NOT contain reasoning + // Verify the API conversation history contains a reasoning block (storage is unconditional) expect(task.apiConversationHistory).toHaveLength(1) - expect((task.apiConversationHistory[0].content[0] as { text: string }).text).toBe( - "Here is my response to your question.", - ) - expect((task.apiConversationHistory[0].content[0] as { text: string }).text).not.toContain("") + expect(task.apiConversationHistory[0].role).toBe("assistant") + expect(task.apiConversationHistory[0].content).toEqual([ + { type: "reasoning", text: reasoningMessage, summary: [] }, + { type: "text", text: assistantMessage }, + ]) }) it("should handle empty reasoning message gracefully when preserveReasoning is true", async () => { @@ -340,29 +321,16 @@ describe("Task reasoning preservation", () => { const assistantMessage = "Here is my response." const reasoningMessage = "" // Empty reasoning - // Spy on addToApiConversationHistory - const addToApiHistorySpy = vi.spyOn(task as any, "addToApiConversationHistory") - - // Simulate what happens in the streaming loop - let finalAssistantMessage = assistantMessage - if (reasoningMessage && task.api.getModel().info.preserveReasoning) { - finalAssistantMessage = `${reasoningMessage}\n${assistantMessage}` - } - - await (task as any).addToApiConversationHistory({ - role: "assistant", - content: [{ type: "text", text: finalAssistantMessage }], - }) - - // Verify that no reasoning tags were added when reasoning is empty - expect(addToApiHistorySpy).toHaveBeenCalledWith({ - role: "assistant", - content: [{ type: "text", text: "Here is my response." }], - }) + await (task as any).addToApiConversationHistory( + { + role: "assistant", + content: [{ type: "text", text: assistantMessage }], + }, + reasoningMessage || undefined, + ) - // Verify the message doesn't contain reasoning tags - expect((task.apiConversationHistory[0].content[0] as { text: string }).text).toBe("Here is my response.") - expect((task.apiConversationHistory[0].content[0] as { text: string }).text).not.toContain("") + // Verify no reasoning blocks were added when reasoning is empty + expect(task.apiConversationHistory[0].content).toEqual([{ type: "text", text: "Here is my response." }]) }) it("should handle undefined preserveReasoning (defaults to false)", async () => { @@ -394,20 +362,19 @@ describe("Task reasoning preservation", () => { const assistantMessage = "Here is my response." const reasoningMessage = "Some reasoning here." - // Simulate what happens in the streaming loop - let finalAssistantMessage = assistantMessage - if (reasoningMessage && task.api.getModel().info.preserveReasoning) { - finalAssistantMessage = `${reasoningMessage}\n${assistantMessage}` - } - - await (task as any).addToApiConversationHistory({ - role: "assistant", - content: [{ type: "text", text: finalAssistantMessage }], - }) + await (task as any).addToApiConversationHistory( + { + role: "assistant", + content: [{ type: "text", text: assistantMessage }], + }, + reasoningMessage, + ) - // Verify reasoning was NOT prepended (undefined defaults to false) - expect((task.apiConversationHistory[0].content[0] as { text: string }).text).toBe("Here is my response.") - expect((task.apiConversationHistory[0].content[0] as { text: string }).text).not.toContain("") + // Verify reasoning is stored even when preserveReasoning is undefined + expect(task.apiConversationHistory[0].content).toEqual([ + { type: "reasoning", text: reasoningMessage, summary: [] }, + { type: "text", text: assistantMessage }, + ]) }) it("should embed encrypted reasoning as first assistant content block", async () => { From 8d059fe7cc695b04bfe3ab7adbb9f76157718918 Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Wed, 4 Feb 2026 16:25:29 -0700 Subject: [PATCH 2/5] fix(ai-sdk): convert message-level reasoning_content to reasoning part --- src/api/transform/__tests__/ai-sdk.spec.ts | 45 ++++++++++++++++++++++ src/api/transform/ai-sdk.ts | 14 ++++++- 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/src/api/transform/__tests__/ai-sdk.spec.ts b/src/api/transform/__tests__/ai-sdk.spec.ts index 1f7dbccd34e..af9f56380be 100644 --- a/src/api/transform/__tests__/ai-sdk.spec.ts +++ b/src/api/transform/__tests__/ai-sdk.spec.ts @@ -354,6 +354,51 @@ describe("AI SDK conversion utilities", () => { ], }) }) + + it("converts assistant message-level reasoning_content to reasoning part", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "assistant", + content: [{ type: "text", text: "Answer" }], + reasoning_content: "Thinking...", + } as any, + ] + + const result = convertToAiSdkMessages(messages) + + expect(result).toHaveLength(1) + expect(result[0]).toEqual({ + role: "assistant", + content: [ + { type: "reasoning", text: "Thinking..." }, + { type: "text", text: "Answer" }, + ], + }) + }) + + it("prefers message-level reasoning_content over reasoning blocks", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "assistant", + content: [ + { type: "reasoning" as any, text: "BLOCK" }, + { type: "text", text: "Answer" }, + ], + reasoning_content: "MSG", + } as any, + ] + + const result = convertToAiSdkMessages(messages) + + expect(result).toHaveLength(1) + expect(result[0]).toEqual({ + role: "assistant", + content: [ + { type: "reasoning", text: "MSG" }, + { type: "text", text: "Answer" }, + ], + }) + }) }) describe("convertToolsForAiSdk", () => { diff --git a/src/api/transform/ai-sdk.ts b/src/api/transform/ai-sdk.ts index c1b939e66e5..e8b861ed84f 100644 --- a/src/api/transform/ai-sdk.ts +++ b/src/api/transform/ai-sdk.ts @@ -127,6 +127,10 @@ export function convertToAiSdkMessages( } else if (message.role === "assistant") { const textParts: string[] = [] const reasoningParts: string[] = [] + const reasoningContent = (() => { + const maybe = (message as unknown as { reasoning_content?: unknown }).reasoning_content + return typeof maybe === "string" && maybe.length > 0 ? maybe : undefined + })() const toolCalls: Array<{ type: "tool-call" toolCallId: string @@ -154,6 +158,10 @@ export function convertToAiSdkMessages( // Task stores reasoning as a content block (type: "reasoning") and Anthropic extended // thinking as (type: "thinking"). Convert both to AI SDK's reasoning part. if ((part as unknown as { type?: string }).type === "reasoning") { + // If message-level reasoning_content is present, treat it as canonical and + // avoid mixing it with content-block reasoning (which can cause duplication). + if (reasoningContent) continue + const text = (part as unknown as { text?: string }).text if (typeof text === "string" && text.length > 0) { reasoningParts.push(text) @@ -162,6 +170,8 @@ export function convertToAiSdkMessages( } if ((part as unknown as { type?: string }).type === "thinking") { + if (reasoningContent) continue + const thinking = (part as unknown as { thinking?: string }).thinking if (typeof thinking === "string" && thinking.length > 0) { reasoningParts.push(thinking) @@ -176,7 +186,9 @@ export function convertToAiSdkMessages( | { type: "tool-call"; toolCallId: string; toolName: string; input: unknown } > = [] - if (reasoningParts.length > 0) { + if (reasoningContent) { + content.push({ type: "reasoning", text: reasoningContent }) + } else if (reasoningParts.length > 0) { content.push({ type: "reasoning", text: reasoningParts.join("") }) } From 32d7325182f552e33e8efdd3e6cced54d475097e Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Wed, 4 Feb 2026 19:53:51 -0700 Subject: [PATCH 3/5] fix(task): remove invalid openai-compatible from reasoning allowlist --- src/core/task/Task.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index d19d205a731..ccb68611bee 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -4577,7 +4577,6 @@ export class Task extends EventEmitter implements TaskLike { "cerebras", "sambanova", "huggingface", - "openai-compatible", ]) const shouldPreserveForApi = From 5a3c1739f108efcdaf4d5f37fb526c48d9c958a7 Mon Sep 17 00:00:00 2001 From: daniel-lxs Date: Thu, 5 Feb 2026 12:02:49 -0500 Subject: [PATCH 4/5] feat: add isAiSdkProvider() method for dynamic AI SDK provider detection - Add isAiSdkProvider() method to ApiHandler interface - Default implementation in BaseProvider returns false - Override to return true in 11 AI SDK providers: deepseek, fireworks, mistral, groq, xai, cerebras, sambanova, huggingface, gemini, vertex, openai-compatible - Update Task.ts to use dynamic detection instead of hardcoded Set - Add method to FakeAIHandler and update test mocks --- src/api/index.ts | 9 +++++++++ src/api/providers/base-provider.ts | 8 ++++++++ src/api/providers/cerebras.ts | 4 ++++ src/api/providers/deepseek.ts | 4 ++++ src/api/providers/fake-ai.ts | 4 ++++ src/api/providers/fireworks.ts | 4 ++++ src/api/providers/gemini.ts | 4 ++++ src/api/providers/groq.ts | 4 ++++ src/api/providers/huggingface.ts | 4 ++++ src/api/providers/mistral.ts | 4 ++++ src/api/providers/openai-compatible.ts | 4 ++++ src/api/providers/sambanova.ts | 4 ++++ src/api/providers/vertex.ts | 4 ++++ src/api/providers/xai.ts | 4 ++++ .../transform/__tests__/image-cleaning.spec.ts | 1 + src/core/task/Task.ts | 15 +-------------- 16 files changed, 67 insertions(+), 14 deletions(-) diff --git a/src/api/index.ts b/src/api/index.ts index 30119b7dc7e..0e25a739a64 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -117,6 +117,15 @@ export interface ApiHandler { * @returns A promise resolving to the token count */ countTokens(content: Array): Promise + + /** + * Indicates whether this provider uses the Vercel AI SDK for streaming. + * AI SDK providers handle reasoning blocks differently and need to preserve + * them in conversation history for proper round-tripping. + * + * @returns true if the provider uses AI SDK, false otherwise + */ + isAiSdkProvider(): boolean } export function buildApiHandler(configuration: ProviderSettings): ApiHandler { diff --git a/src/api/providers/base-provider.ts b/src/api/providers/base-provider.ts index a6adeeadbd4..817af53a494 100644 --- a/src/api/providers/base-provider.ts +++ b/src/api/providers/base-provider.ts @@ -119,4 +119,12 @@ export abstract class BaseProvider implements ApiHandler { return countTokens(content, { useWorker: true }) } + + /** + * Default implementation returns false. + * AI SDK providers should override this to return true. + */ + isAiSdkProvider(): boolean { + return false + } } diff --git a/src/api/providers/cerebras.ts b/src/api/providers/cerebras.ts index de1a4b2dbbe..0ca8b200662 100644 --- a/src/api/providers/cerebras.ts +++ b/src/api/providers/cerebras.ts @@ -156,4 +156,8 @@ export class CerebrasHandler extends BaseProvider implements SingleCompletionHan return text } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/providers/deepseek.ts b/src/api/providers/deepseek.ts index ba9c9d47e30..091f585456c 100644 --- a/src/api/providers/deepseek.ts +++ b/src/api/providers/deepseek.ts @@ -166,4 +166,8 @@ export class DeepSeekHandler extends BaseProvider implements SingleCompletionHan return text } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/providers/fake-ai.ts b/src/api/providers/fake-ai.ts index c73752fc661..b6bb9fd2c34 100644 --- a/src/api/providers/fake-ai.ts +++ b/src/api/providers/fake-ai.ts @@ -78,4 +78,8 @@ export class FakeAIHandler implements ApiHandler, SingleCompletionHandler { completePrompt(prompt: string): Promise { return this.ai.completePrompt(prompt) } + + isAiSdkProvider(): boolean { + return false + } } diff --git a/src/api/providers/fireworks.ts b/src/api/providers/fireworks.ts index 52bf431bb64..bc5560bfbbf 100644 --- a/src/api/providers/fireworks.ts +++ b/src/api/providers/fireworks.ts @@ -172,4 +172,8 @@ export class FireworksHandler extends BaseProvider implements SingleCompletionHa return text } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts index ed291db74e7..58f58bd1498 100644 --- a/src/api/providers/gemini.ts +++ b/src/api/providers/gemini.ts @@ -397,4 +397,8 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl return totalCost } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/providers/groq.ts b/src/api/providers/groq.ts index 648679f92cf..12bd6b4023f 100644 --- a/src/api/providers/groq.ts +++ b/src/api/providers/groq.ts @@ -174,4 +174,8 @@ export class GroqHandler extends BaseProvider implements SingleCompletionHandler return text } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/providers/huggingface.ts b/src/api/providers/huggingface.ts index 25d0608a331..79daa95c77c 100644 --- a/src/api/providers/huggingface.ts +++ b/src/api/providers/huggingface.ts @@ -208,4 +208,8 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion return text } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/providers/mistral.ts b/src/api/providers/mistral.ts index 7ce2fc4586d..77d2b9f572b 100644 --- a/src/api/providers/mistral.ts +++ b/src/api/providers/mistral.ts @@ -198,4 +198,8 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand return text } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/providers/openai-compatible.ts b/src/api/providers/openai-compatible.ts index 240de747be4..8f810349abe 100644 --- a/src/api/providers/openai-compatible.ts +++ b/src/api/providers/openai-compatible.ts @@ -186,4 +186,8 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si return text } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/providers/sambanova.ts b/src/api/providers/sambanova.ts index 1e68dae33ff..e1fee215060 100644 --- a/src/api/providers/sambanova.ts +++ b/src/api/providers/sambanova.ts @@ -177,4 +177,8 @@ export class SambaNovaHandler extends BaseProvider implements SingleCompletionHa return text } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/providers/vertex.ts b/src/api/providers/vertex.ts index 62aec505c92..b2eb370c2f9 100644 --- a/src/api/providers/vertex.ts +++ b/src/api/providers/vertex.ts @@ -402,4 +402,8 @@ export class VertexHandler extends BaseProvider implements SingleCompletionHandl return totalCost } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/providers/xai.ts b/src/api/providers/xai.ts index 238dbeaf2de..88a7aceb464 100644 --- a/src/api/providers/xai.ts +++ b/src/api/providers/xai.ts @@ -187,4 +187,8 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler throw handleAiSdkError(error, "xAI") } } + + override isAiSdkProvider(): boolean { + return true + } } diff --git a/src/api/transform/__tests__/image-cleaning.spec.ts b/src/api/transform/__tests__/image-cleaning.spec.ts index e32a4b8770f..fc91e0da46e 100644 --- a/src/api/transform/__tests__/image-cleaning.spec.ts +++ b/src/api/transform/__tests__/image-cleaning.spec.ts @@ -18,6 +18,7 @@ describe("maybeRemoveImageBlocks", () => { }), createMessage: vitest.fn(), countTokens: vitest.fn(), + isAiSdkProvider: vitest.fn().mockReturnValue(false), } } diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index ccb68611bee..51c05138b68 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -4567,21 +4567,8 @@ export class Task extends EventEmitter implements TaskLike { // Preserve plain-text reasoning blocks for: // - models explicitly opting in via preserveReasoning // - AI SDK providers (provider packages decide what to include in the native request) - const aiSdkProviders = new Set([ - "deepseek", - "fireworks", - "moonshot", - "mistral", - "groq", - "xai", - "cerebras", - "sambanova", - "huggingface", - ]) - const shouldPreserveForApi = - this.api.getModel().info.preserveReasoning === true || - aiSdkProviders.has(this.apiConfiguration.apiProvider ?? "") + this.api.getModel().info.preserveReasoning === true || this.api.isAiSdkProvider() let assistantContent: Anthropic.Messages.MessageParam["content"] From 7bc68de7d813e5deff35433e905952ed9f35de8e Mon Sep 17 00:00:00 2001 From: daniel-lxs Date: Thu, 5 Feb 2026 12:06:30 -0500 Subject: [PATCH 5/5] fix: handle reasoning parts in flattenAiSdkMessagesToStringContent - Strip reasoning parts when flattening messages for string-only models - Allow flattening when message contains only text and reasoning parts - Add tests for reasoning part handling in string-only model contexts This addresses the review feedback about ensuring flattenAiSdkMessagesToStringContent works correctly when reasoning parts are present (e.g., SambaNova DeepSeek). --- src/api/transform/__tests__/ai-sdk.spec.ts | 49 ++++++++++++++++++++++ src/api/transform/ai-sdk.ts | 11 +++-- 2 files changed, 56 insertions(+), 4 deletions(-) diff --git a/src/api/transform/__tests__/ai-sdk.spec.ts b/src/api/transform/__tests__/ai-sdk.spec.ts index af9f56380be..fd112f2896f 100644 --- a/src/api/transform/__tests__/ai-sdk.spec.ts +++ b/src/api/transform/__tests__/ai-sdk.spec.ts @@ -908,5 +908,54 @@ describe("AI SDK conversion utilities", () => { expect(result[0].content).toBe("\nHello") }) + + it("should strip reasoning parts and flatten text for string-only models", () => { + const messages = [ + { + role: "assistant" as const, + content: [ + { type: "reasoning" as const, text: "I am thinking about this..." }, + { type: "text" as const, text: "Here is my answer" }, + ], + }, + ] + + const result = flattenAiSdkMessagesToStringContent(messages) + + // Reasoning should be stripped, only text should remain + expect(result[0].content).toBe("Here is my answer") + }) + + it("should handle messages with only reasoning parts", () => { + const messages = [ + { + role: "assistant" as const, + content: [{ type: "reasoning" as const, text: "Only reasoning, no text" }], + }, + ] + + const result = flattenAiSdkMessagesToStringContent(messages) + + // Should flatten to empty string when only reasoning is present + expect(result[0].content).toBe("") + }) + + it("should not flatten if tool calls are present with reasoning", () => { + const messages = [ + { + role: "assistant" as const, + content: [ + { type: "reasoning" as const, text: "Thinking..." }, + { type: "text" as const, text: "Using tool" }, + { type: "tool-call" as const, toolCallId: "abc", toolName: "test", input: {} }, + ], + }, + ] + + const result = flattenAiSdkMessagesToStringContent(messages) + + // Should not flatten because there's a tool call + expect(result[0]).toEqual(messages[0]) + }) }) }) diff --git a/src/api/transform/ai-sdk.ts b/src/api/transform/ai-sdk.ts index e8b861ed84f..73f3131bef2 100644 --- a/src/api/transform/ai-sdk.ts +++ b/src/api/transform/ai-sdk.ts @@ -267,10 +267,13 @@ export function flattenAiSdkMessagesToStringContent( // Handle assistant messages if (message.role === "assistant" && flattenAssistantMessages && Array.isArray(message.content)) { const parts = message.content as Array<{ type: string; text?: string }> - // Only flatten if all parts are text (no tool calls) - const allText = parts.every((part) => part.type === "text") - if (allText && parts.length > 0) { - const textContent = parts.map((part) => part.text || "").join("\n") + // Only flatten if all parts are text or reasoning (no tool calls) + // Reasoning parts are included in text to avoid sending multipart content to string-only models + const allTextOrReasoning = parts.every((part) => part.type === "text" || part.type === "reasoning") + if (allTextOrReasoning && parts.length > 0) { + // Extract only text parts for the flattened content (reasoning is stripped for string-only models) + const textParts = parts.filter((part) => part.type === "text") + const textContent = textParts.map((part) => part.text || "").join("\n") return { ...message, content: textContent,