From f0b81bb09c88b7f9a439ce04f4201f37b72d22e1 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Mon, 9 Feb 2026 16:35:07 -0800 Subject: [PATCH 01/20] feat(ai): align start event types with AG-UI Widen TextMessageStartEvent.role to accept all message roles and add optional parentMessageId to ToolCallStartEvent. Co-Authored-By: Claude Opus 4.6 --- packages/typescript/ai/src/types.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index 4d7ca6e5..0a9741c3 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -778,8 +778,8 @@ export interface TextMessageStartEvent extends BaseAGUIEvent { type: 'TEXT_MESSAGE_START' /** Unique identifier for this message */ messageId: string - /** Role is always assistant for generated messages */ - role: 'assistant' + /** Role of the message sender */ + role: 'user' | 'assistant' | 'system' | 'tool' } /** @@ -813,6 +813,8 @@ export interface ToolCallStartEvent extends BaseAGUIEvent { toolCallId: string /** Name of the tool being called */ toolName: string + /** ID of the parent message that initiated this tool call */ + parentMessageId?: string /** Index for parallel tool calls */ index?: number } From 30d00e17a2b2da4778686afa9038ffde2fab6e59 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Mon, 9 Feb 2026 16:36:37 -0800 Subject: [PATCH 02/20] feat(ai): add MessageStreamState type for per-message stream tracking Co-Authored-By: Claude Opus 4.6 --- .../ai/src/activities/chat/stream/types.ts | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/packages/typescript/ai/src/activities/chat/stream/types.ts b/packages/typescript/ai/src/activities/chat/stream/types.ts index 2a323507..8bde07b7 100644 --- a/packages/typescript/ai/src/activities/chat/stream/types.ts +++ b/packages/typescript/ai/src/activities/chat/stream/types.ts @@ -45,6 +45,24 @@ export interface ChunkStrategy { reset?: () => void } +/** + * Per-message streaming state. + * Tracks the accumulation of text, tool calls, and thinking content + * for a single message in the stream. + */ +export interface MessageStreamState { + id: string + role: 'user' | 'assistant' | 'system' | 'tool' + totalTextContent: string + currentSegmentText: string + lastEmittedText: string + thinkingContent: string + toolCalls: Map + toolCallOrder: Array + hasToolCallsSinceTextStart: boolean + isComplete: boolean +} + /** * Result from processing a stream */ From f03493bb28d40930836554a8aef3a7632d2f80b7 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Mon, 9 Feb 2026 16:48:18 -0800 Subject: [PATCH 03/20] feat(ai): refactor StreamProcessor to per-message state Replace single-message instance variables with a Map keyed by messageId. Add explicit handlers for TEXT_MESSAGE_START, TEXT_MESSAGE_END, and STATE_SNAPSHOT events. Route tool calls via toolCallToMessage mapping. Maintains backward compat: startAssistantMessage() sets pendingManualMessageId which TEXT_MESSAGE_START associates with. ensureAssistantMessage() auto-creates state for streams without TEXT_MESSAGE_START. Co-Authored-By: Claude Opus 4.6 --- .../src/activities/chat/stream/processor.ts | 821 ++++++++++++------ .../ai/tests/stream-processor.test.ts | 459 +++++++++- 2 files changed, 1017 insertions(+), 263 deletions(-) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 96d95865..4efa7d49 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -12,11 +12,16 @@ * - Thinking/reasoning content * - Recording/replay for testing * - Event-driven architecture for UI updates + * - Per-message stream state tracking for multi-message sessions * * @see docs/chat-architecture.md — Canonical reference for AG-UI chunk ordering, * adapter contract, single-shot flows, and expected UIMessage output. */ -import { generateMessageId, uiMessageToModelMessages } from '../messages.js' +import { + generateMessageId, + normalizeToUIMessage, + uiMessageToModelMessages, +} from '../messages.js' import { defaultJSONParser } from './json-parser' import { updateTextPart, @@ -32,6 +37,7 @@ import type { ChunkRecording, ChunkStrategy, InternalToolCallState, + MessageStreamState, ProcessorResult, ProcessorState, ToolCallState, @@ -109,9 +115,8 @@ export interface StreamProcessorOptions { * * State tracking: * - Full message array - * - Current assistant message being streamed - * - Text content accumulation (reset on TEXT_MESSAGE_START) - * - Multiple parallel tool calls + * - Per-message stream state (text, tool calls, thinking) + * - Multiple concurrent message streams * - Tool call completion via TOOL_CALL_END events * * @see docs/chat-architecture.md#streamprocessor-internal-state — State field reference @@ -125,17 +130,14 @@ export class StreamProcessor { // Message state private messages: Array = [] - private currentAssistantMessageId: string | null = null - - // Stream state for current assistant message - // Total accumulated text across all segments (for the final result) - private totalTextContent = '' - // Current segment's text content (for onTextUpdate callbacks) - private currentSegmentText = '' - private lastEmittedText = '' - private thinkingContent = '' - private toolCalls: Map = new Map() - private toolCallOrder: Array = [] + + // Per-message stream state + private messageStates: Map = new Map() + private activeMessageIds: Set = new Set() + private toolCallToMessage: Map = new Map() + private pendingManualMessageId: string | null = null + + // Shared stream state private finishReason: string | null = null private hasError = false private isDone = false @@ -224,18 +226,17 @@ export class StreamProcessor { prepareAssistantMessage(): void { // Reset stream state for new message this.resetStreamState() - // Clear the current assistant message ID so ensureAssistantMessage() - // will create a fresh message on the first content chunk - this.currentAssistantMessageId = null } /** * @deprecated Use prepareAssistantMessage() instead. This eagerly creates * an assistant message which can cause empty message flicker. */ - startAssistantMessage(): string { + startAssistantMessage(messageId?: string): string { this.prepareAssistantMessage() - return this.ensureAssistantMessage() + const { messageId: id } = this.ensureAssistantMessage(messageId) + this.pendingManualMessageId = id + return id } /** @@ -244,39 +245,16 @@ export class StreamProcessor { * has arrived yet. */ getCurrentAssistantMessageId(): string | null { - return this.currentAssistantMessageId - } - - /** - * Lazily create the assistant message if it hasn't been created yet. - * Called by content handlers on the first content-bearing chunk. - * Returns the message ID. - * - * Content-bearing chunks that trigger this: - * TEXT_MESSAGE_CONTENT, TOOL_CALL_START, STEP_FINISHED, RUN_ERROR. - * - * @see docs/chat-architecture.md#streamprocessor-internal-state — Lazy creation pattern - */ - private ensureAssistantMessage(): string { - if (this.currentAssistantMessageId) { - return this.currentAssistantMessageId - } - - const assistantMessage: UIMessage = { - id: generateMessageId(), - role: 'assistant', - parts: [], - createdAt: new Date(), + // Scan all message states (not just active) for the last assistant. + // After finalizeStream() clears activeMessageIds, messageStates retains entries. + // After reset() / resetStreamState(), messageStates is cleared → returns null. + let lastId: string | null = null + for (const [id, state] of this.messageStates) { + if (state.role === 'assistant') { + lastId = id + } } - - this.currentAssistantMessageId = assistantMessage.id - this.messages = [...this.messages, assistantMessage] - - // Emit events - this.events.onStreamStart?.() - this.emitMessagesChange() - - return assistantMessage.id + return lastId } /** @@ -403,7 +381,10 @@ export class StreamProcessor { */ clearMessages(): void { this.messages = [] - this.currentAssistantMessageId = null + this.messageStates.clear() + this.activeMessageIds.clear() + this.toolCallToMessage.clear() + this.pendingManualMessageId = null this.emitMessagesChange() } @@ -444,7 +425,7 @@ export class StreamProcessor { * * Central dispatch for all AG-UI events. Each event type maps to a specific * handler. Events not listed in the switch are intentionally ignored - * (RUN_STARTED, TEXT_MESSAGE_END, STEP_STARTED, STATE_SNAPSHOT, STATE_DELTA). + * (RUN_STARTED, STEP_STARTED, STATE_DELTA). * * @see docs/chat-architecture.md#adapter-contract — Expected event types and ordering */ @@ -461,13 +442,17 @@ export class StreamProcessor { switch (chunk.type) { // AG-UI Events case 'TEXT_MESSAGE_START': - this.handleTextMessageStartEvent() + this.handleTextMessageStartEvent(chunk) break case 'TEXT_MESSAGE_CONTENT': this.handleTextMessageContentEvent(chunk) break + case 'TEXT_MESSAGE_END': + this.handleTextMessageEndEvent(chunk) + break + case 'TOOL_CALL_START': this.handleToolCallStartEvent(chunk) break @@ -496,31 +481,230 @@ export class StreamProcessor { this.handleCustomEvent(chunk) break + case 'STATE_SNAPSHOT': + this.handleStateSnapshotEvent(chunk) + break + default: - // RUN_STARTED, TEXT_MESSAGE_END, STEP_STARTED, - // STATE_SNAPSHOT, STATE_DELTA - no special handling needed + // RUN_STARTED, STEP_STARTED, STATE_DELTA - no special handling needed break } } + // ============================================ + // Per-Message State Helpers + // ============================================ + /** - * Handle TEXT_MESSAGE_START event — marks the beginning of a new text segment. - * Resets segment accumulation so text after tool calls starts fresh. - * - * This is the key mechanism for multi-segment text (text before and after tool - * calls becoming separate TextParts). Without this reset, all text would merge - * into a single TextPart and tool-call interleaving would be lost. - * - * @see docs/chat-architecture.md#single-shot-text-response — Step-by-step text processing - * @see docs/chat-architecture.md#text-then-tool-interleaving-single-shot — Multi-segment text + * Create a new MessageStreamState for a message + */ + private createMessageState( + messageId: string, + role: 'user' | 'assistant' | 'system' | 'tool', + ): MessageStreamState { + const state: MessageStreamState = { + id: messageId, + role, + totalTextContent: '', + currentSegmentText: '', + lastEmittedText: '', + thinkingContent: '', + toolCalls: new Map(), + toolCallOrder: [], + hasToolCallsSinceTextStart: false, + isComplete: false, + } + this.messageStates.set(messageId, state) + return state + } + + /** + * Get the MessageStreamState for a message + */ + private getMessageState( + messageId: string, + ): MessageStreamState | undefined { + return this.messageStates.get(messageId) + } + + /** + * Get the most recent active assistant message ID. + * Used as fallback for events that don't include a messageId. + */ + private getActiveAssistantMessageId(): string | null { + // Iterate in reverse order of insertion (most recent first) + let lastId: string | null = null + for (const id of this.activeMessageIds) { + const state = this.messageStates.get(id) + if (state && (state.role === 'assistant')) { + lastId = id + } + } + return lastId + } + + /** + * Ensure an active assistant message exists, creating one if needed. + * Used for backward compat when events arrive without prior TEXT_MESSAGE_START. + */ + private ensureAssistantMessage(preferredId?: string): { + messageId: string + state: MessageStreamState + } { + // Try to find state by preferred ID + if (preferredId) { + const state = this.getMessageState(preferredId) + if (state) return { messageId: preferredId, state } + } + + // Try active assistant message + const activeId = this.getActiveAssistantMessageId() + if (activeId) { + const state = this.getMessageState(activeId)! + return { messageId: activeId, state } + } + + // Auto-create an assistant message (backward compat for process() without TEXT_MESSAGE_START) + const id = preferredId || generateMessageId() + const assistantMessage: UIMessage = { + id, + role: 'assistant', + parts: [], + createdAt: new Date(), + } + this.messages = [...this.messages, assistantMessage] + const state = this.createMessageState(id, 'assistant') + this.activeMessageIds.add(id) + this.pendingManualMessageId = id + this.events.onStreamStart?.() + return { messageId: id, state } + } + + // ============================================ + // Event Handlers + // ============================================ + + /** + * Handle TEXT_MESSAGE_START event */ - private handleTextMessageStartEvent(): void { - // Emit any pending text from a previous segment before resetting - if (this.currentSegmentText !== this.lastEmittedText) { - this.emitTextUpdate() + private handleTextMessageStartEvent( + chunk: Extract, + ): void { + const { messageId, role } = chunk + + // Case 1: A manual message was created via startAssistantMessage() + if (this.pendingManualMessageId) { + const pendingId = this.pendingManualMessageId + this.pendingManualMessageId = null + + if (pendingId !== messageId) { + // Update the message's ID in the messages array + this.messages = this.messages.map((msg) => + msg.id === pendingId ? { ...msg, id: messageId } : msg, + ) + + // Move state to the new key + const existingState = this.messageStates.get(pendingId) + if (existingState) { + existingState.id = messageId + this.messageStates.delete(pendingId) + this.messageStates.set(messageId, existingState) + } + + // Update activeMessageIds + this.activeMessageIds.delete(pendingId) + this.activeMessageIds.add(messageId) + } + + // Ensure state exists + if (!this.messageStates.has(messageId)) { + this.createMessageState(messageId, role) + this.activeMessageIds.add(messageId) + } + + this.emitMessagesChange() + return + } + + // Case 2: Message already exists (dedup) + const existingMsg = this.messages.find((m) => m.id === messageId) + if (existingMsg) { + this.activeMessageIds.add(messageId) + if (!this.messageStates.has(messageId)) { + this.createMessageState(messageId, role) + } else { + const existingState = this.messageStates.get(messageId)! + // If tool calls happened since last text, this TEXT_MESSAGE_START + // signals a new text segment — reset segment accumulation + if (existingState.hasToolCallsSinceTextStart) { + if (existingState.currentSegmentText !== existingState.lastEmittedText) { + this.emitTextUpdateForMessage(messageId) + } + existingState.currentSegmentText = '' + existingState.lastEmittedText = '' + existingState.hasToolCallsSinceTextStart = false + } + } + return + } + + // Case 3: New message from the stream + // Map 'tool' role to 'assistant' for UIMessage (UIMessage doesn't support 'tool' role) + const uiRole: 'system' | 'user' | 'assistant' = + role === 'tool' ? 'assistant' : role + + const newMessage: UIMessage = { + id: messageId, + role: uiRole, + parts: [], + createdAt: new Date(), + } + + this.messages = [...this.messages, newMessage] + this.createMessageState(messageId, role) + this.activeMessageIds.add(messageId) + + this.events.onStreamStart?.() + this.emitMessagesChange() + } + + /** + * Handle TEXT_MESSAGE_END event + */ + private handleTextMessageEndEvent( + chunk: Extract, + ): void { + const { messageId } = chunk + const state = this.getMessageState(messageId) + if (!state) return + + // Emit any pending text for this message + if (state.currentSegmentText !== state.lastEmittedText) { + this.emitTextUpdateForMessage(messageId) + } + + // Complete all tool calls for this message + this.completeAllToolCallsForMessage(messageId) + } + + /** + * Handle STATE_SNAPSHOT event + */ + private handleStateSnapshotEvent( + chunk: Extract, + ): void { + const stateMessages = ( + chunk.state as { messages?: Array } + )?.messages + if (Array.isArray(stateMessages)) { + this.messages = stateMessages.map((msg) => + normalizeToUIMessage( + msg as UIMessage, + generateMessageId, + ), + ) + this.emitMessagesChange() } - this.currentSegmentText = '' - this.lastEmittedText = '' } /** @@ -537,17 +721,61 @@ export class StreamProcessor { private handleTextMessageContentEvent( chunk: Extract, ): void { - this.ensureAssistantMessage() + const { messageId, state } = this.ensureAssistantMessage(chunk.messageId) + + // Content arriving means all current tool calls for this message are complete + this.completeAllToolCallsForMessage(messageId) + + const previousSegment = state.currentSegmentText + + // Detect if this is a NEW text segment (after tool calls) vs continuation + const isNewSegment = + state.hasToolCallsSinceTextStart && + previousSegment.length > 0 && + this.isNewTextSegment(chunk, previousSegment) + + if (isNewSegment) { + // Emit any accumulated text before starting new segment + if (previousSegment !== state.lastEmittedText) { + this.emitTextUpdateForMessage(messageId) + } + // Reset SEGMENT text accumulation for the new text segment after tool calls + state.currentSegmentText = '' + state.lastEmittedText = '' + state.hasToolCallsSinceTextStart = false + } + + const currentText = state.currentSegmentText + let nextText = currentText + + // Prefer delta over content - delta is the incremental change + // Check for both undefined and empty string to avoid "undefined" string concatenation + if (chunk.delta !== undefined && chunk.delta !== '') { + nextText = currentText + chunk.delta + } else if (chunk.content !== undefined && chunk.content !== '') { + // Fallback: use content if delta is not provided + if (chunk.content.startsWith(currentText)) { + nextText = chunk.content + } else if (currentText.startsWith(chunk.content)) { + nextText = currentText + } else { + nextText = currentText + chunk.content + } + } - this.currentSegmentText += chunk.delta - this.totalTextContent += chunk.delta + // Calculate the delta for totalTextContent + const textDelta = nextText.slice(currentText.length) + state.currentSegmentText = nextText + state.totalTextContent += textDelta + // Use delta for chunk strategy if available + const chunkPortion = chunk.delta || chunk.content || '' const shouldEmit = this.chunkStrategy.shouldEmit( - chunk.delta, - this.currentSegmentText, + chunkPortion, + state.currentSegmentText, ) - if (shouldEmit && this.currentSegmentText !== this.lastEmittedText) { - this.emitTextUpdate() + if (shouldEmit && state.currentSegmentText !== state.lastEmittedText) { + this.emitTextUpdateForMessage(messageId) } } @@ -567,10 +795,18 @@ export class StreamProcessor { private handleToolCallStartEvent( chunk: Extract, ): void { - this.ensureAssistantMessage() + // Determine the message this tool call belongs to + const targetMessageId = + chunk.parentMessageId ?? this.getActiveAssistantMessageId() + const { messageId, state } = this.ensureAssistantMessage( + targetMessageId ?? undefined, + ) + + // Mark that we've seen tool calls since the last text segment + state.hasToolCallsSinceTextStart = true const toolCallId = chunk.toolCallId - const existingToolCall = this.toolCalls.get(toolCallId) + const existingToolCall = state.toolCalls.get(toolCallId) if (!existingToolCall) { // New tool call starting @@ -582,34 +818,31 @@ export class StreamProcessor { arguments: '', state: initialState, parsedArguments: undefined, - index: chunk.index ?? this.toolCalls.size, + index: chunk.index ?? state.toolCalls.size, } - this.toolCalls.set(toolCallId, newToolCall) - this.toolCallOrder.push(toolCallId) + state.toolCalls.set(toolCallId, newToolCall) + state.toolCallOrder.push(toolCallId) + + // Store mapping for TOOL_CALL_ARGS/END routing + this.toolCallToMessage.set(toolCallId, messageId) // Update UIMessage - if (this.currentAssistantMessageId) { - this.messages = updateToolCallPart( - this.messages, - this.currentAssistantMessageId, - { - id: chunk.toolCallId, - name: chunk.toolName, - arguments: '', - state: initialState, - }, - ) - this.emitMessagesChange() + this.messages = updateToolCallPart(this.messages, messageId, { + id: chunk.toolCallId, + name: chunk.toolName, + arguments: '', + state: initialState, + }) + this.emitMessagesChange() - // Emit granular event - this.events.onToolCallStateChange?.( - this.currentAssistantMessageId, - chunk.toolCallId, - initialState, - '', - ) - } + // Emit granular event + this.events.onToolCallStateChange?.( + messageId, + chunk.toolCallId, + initialState, + '', + ) } } @@ -629,47 +862,46 @@ export class StreamProcessor { chunk: Extract, ): void { const toolCallId = chunk.toolCallId - const existingToolCall = this.toolCalls.get(toolCallId) - - if (existingToolCall) { - const wasAwaitingInput = existingToolCall.state === 'awaiting-input' + const messageId = this.toolCallToMessage.get(toolCallId) + if (!messageId) return - // Accumulate arguments from delta - existingToolCall.arguments += chunk.delta || '' + const state = this.getMessageState(messageId) + if (!state) return - // Update state - if (wasAwaitingInput && chunk.delta) { - existingToolCall.state = 'input-streaming' - } + const existingToolCall = state.toolCalls.get(toolCallId) + if (!existingToolCall) return - // Try to parse the updated arguments - existingToolCall.parsedArguments = this.jsonParser.parse( - existingToolCall.arguments, - ) + const wasAwaitingInput = existingToolCall.state === 'awaiting-input' - // Update UIMessage - if (this.currentAssistantMessageId) { - this.messages = updateToolCallPart( - this.messages, - this.currentAssistantMessageId, - { - id: existingToolCall.id, - name: existingToolCall.name, - arguments: existingToolCall.arguments, - state: existingToolCall.state, - }, - ) - this.emitMessagesChange() + // Accumulate arguments from delta + existingToolCall.arguments += chunk.delta || '' - // Emit granular event - this.events.onToolCallStateChange?.( - this.currentAssistantMessageId, - existingToolCall.id, - existingToolCall.state, - existingToolCall.arguments, - ) - } + // Update state + if (wasAwaitingInput && chunk.delta) { + existingToolCall.state = 'input-streaming' } + + // Try to parse the updated arguments + existingToolCall.parsedArguments = this.jsonParser.parse( + existingToolCall.arguments, + ) + + // Update UIMessage + this.messages = updateToolCallPart(this.messages, messageId, { + id: existingToolCall.id, + name: existingToolCall.name, + arguments: existingToolCall.arguments, + state: existingToolCall.state, + }) + this.emitMessagesChange() + + // Emit granular event + this.events.onToolCallStateChange?.( + messageId, + existingToolCall.id, + existingToolCall.state, + existingToolCall.arguments, + ) } /** @@ -689,11 +921,17 @@ export class StreamProcessor { private handleToolCallEndEvent( chunk: Extract, ): void { + const messageId = this.toolCallToMessage.get(chunk.toolCallId) + if (!messageId) return + + const msgState = this.getMessageState(messageId) + if (!msgState) return + // Transition the tool call to input-complete (the authoritative completion signal) - const existingToolCall = this.toolCalls.get(chunk.toolCallId) + const existingToolCall = msgState.toolCalls.get(chunk.toolCallId) if (existingToolCall && existingToolCall.state !== 'input-complete') { - const index = this.toolCallOrder.indexOf(chunk.toolCallId) - this.completeToolCall(index, existingToolCall) + const index = msgState.toolCallOrder.indexOf(chunk.toolCallId) + this.completeToolCall(messageId, index, existingToolCall) // If TOOL_CALL_END provides parsed input, use it as the canonical parsed // arguments (overrides the accumulated string parse from completeToolCall) if (chunk.input !== undefined) { @@ -701,10 +939,8 @@ export class StreamProcessor { } } - // Update UIMessage if we have a current assistant message and a result - if (this.currentAssistantMessageId && chunk.result) { - const state: ToolResultState = 'complete' - + // Update UIMessage if there's a result + if (chunk.result) { // Step 1: Update the tool-call part's output field (for UI consistency // with client tools — see GitHub issue #176) let output: unknown @@ -720,12 +956,13 @@ export class StreamProcessor { ) // Step 2: Create/update the tool-result part (for LLM conversation history) + const resultState: ToolResultState = 'complete' this.messages = updateToolResultPart( this.messages, - this.currentAssistantMessageId, + messageId, chunk.toolCallId, chunk.result, - state, + resultState, ) this.emitMessagesChange() } @@ -772,25 +1009,36 @@ export class StreamProcessor { private handleStepFinishedEvent( chunk: Extract, ): void { - this.ensureAssistantMessage() + const { messageId, state } = this.ensureAssistantMessage() + + const previous = state.thinkingContent + let nextThinking = previous + + // Prefer delta over content + if (chunk.delta && chunk.delta !== '') { + nextThinking = previous + chunk.delta + } else if (chunk.content && chunk.content !== '') { + if (chunk.content.startsWith(previous)) { + nextThinking = chunk.content + } else if (previous.startsWith(chunk.content)) { + nextThinking = previous + } else { + nextThinking = previous + chunk.content + } + } - this.thinkingContent += chunk.delta + state.thinkingContent = nextThinking // Update UIMessage - if (this.currentAssistantMessageId) { - this.messages = updateThinkingPart( - this.messages, - this.currentAssistantMessageId, - this.thinkingContent, - ) - this.emitMessagesChange() + this.messages = updateThinkingPart( + this.messages, + messageId, + state.thinkingContent, + ) + this.emitMessagesChange() - // Emit granular event - this.events.onThinkingUpdate?.( - this.currentAssistantMessageId, - this.thinkingContent, - ) - } + // Emit granular event + this.events.onThinkingUpdate?.(messageId, state.thinkingContent) } /** @@ -806,6 +1054,8 @@ export class StreamProcessor { private handleCustomEvent( chunk: Extract, ): void { + const messageId = this.getActiveAssistantMessageId() + // Handle client tool input availability - trigger client-side execution if (chunk.name === 'tool-input-available' && chunk.data) { const { toolCallId, toolName, input } = chunk.data as { @@ -832,10 +1082,10 @@ export class StreamProcessor { } // Update the tool call part with approval state - if (this.currentAssistantMessageId) { + if (messageId) { this.messages = updateToolCallApproval( this.messages, - this.currentAssistantMessageId, + messageId, toolCallId, approval.id, ) @@ -852,8 +1102,34 @@ export class StreamProcessor { } } + // ============================================ + // Internal Helpers + // ============================================ + + /** + * Detect if an incoming content chunk represents a NEW text segment + */ + private isNewTextSegment( + chunk: Extract, + previous: string, + ): boolean { + // Check if content is present (delta is always defined but may be empty string) + if (chunk.content !== undefined) { + if (chunk.content.length < previous.length) { + return true + } + if ( + !chunk.content.startsWith(previous) && + !previous.startsWith(chunk.content) + ) { + return true + } + } + return false + } + /** - * Complete all tool calls — safety net for stream termination. + * Complete all tool calls across all active messages — safety net for stream termination. * * Called by RUN_FINISHED and finalizeStream(). Force-transitions any tool call * not yet in input-complete state. Handles cases where TOOL_CALL_END was @@ -862,10 +1138,22 @@ export class StreamProcessor { * @see docs/chat-architecture.md#single-shot-tool-call-response — Safety net behavior */ private completeAllToolCalls(): void { - this.toolCalls.forEach((toolCall, id) => { + for (const messageId of this.activeMessageIds) { + this.completeAllToolCallsForMessage(messageId) + } + } + + /** + * Complete all tool calls for a specific message + */ + private completeAllToolCallsForMessage(messageId: string): void { + const state = this.getMessageState(messageId) + if (!state) return + + state.toolCalls.forEach((toolCall, id) => { if (toolCall.state !== 'input-complete') { - const index = this.toolCallOrder.indexOf(id) - this.completeToolCall(index, toolCall) + const index = state.toolCallOrder.indexOf(id) + this.completeToolCall(messageId, index, toolCall) } }) } @@ -874,6 +1162,7 @@ export class StreamProcessor { * Mark a tool call as complete and emit event */ private completeToolCall( + messageId: string, _index: number, toolCall: InternalToolCallState, ): void { @@ -883,31 +1172,25 @@ export class StreamProcessor { toolCall.parsedArguments = this.jsonParser.parse(toolCall.arguments) // Update UIMessage - if (this.currentAssistantMessageId) { - this.messages = updateToolCallPart( - this.messages, - this.currentAssistantMessageId, - { - id: toolCall.id, - name: toolCall.name, - arguments: toolCall.arguments, - state: 'input-complete', - }, - ) - this.emitMessagesChange() + this.messages = updateToolCallPart(this.messages, messageId, { + id: toolCall.id, + name: toolCall.name, + arguments: toolCall.arguments, + state: 'input-complete', + }) + this.emitMessagesChange() - // Emit granular event - this.events.onToolCallStateChange?.( - this.currentAssistantMessageId, - toolCall.id, - 'input-complete', - toolCall.arguments, - ) - } + // Emit granular event + this.events.onToolCallStateChange?.( + messageId, + toolCall.id, + 'input-complete', + toolCall.arguments, + ) } /** - * Emit pending text update. + * Emit pending text update for a specific message. * * Calls updateTextPart() which has critical append-vs-replace logic: * - If last UIMessage part is TextPart → replaces its content (same segment). @@ -915,24 +1198,22 @@ export class StreamProcessor { * * @see docs/chat-architecture.md#uimessage-part-ordering-invariants — Replace vs. push logic */ - private emitTextUpdate(): void { - this.lastEmittedText = this.currentSegmentText + private emitTextUpdateForMessage(messageId: string): void { + const state = this.getMessageState(messageId) + if (!state) return + + state.lastEmittedText = state.currentSegmentText // Update UIMessage - if (this.currentAssistantMessageId) { - this.messages = updateTextPart( - this.messages, - this.currentAssistantMessageId, - this.currentSegmentText, - ) - this.emitMessagesChange() + this.messages = updateTextPart( + this.messages, + messageId, + state.currentSegmentText, + ) + this.emitMessagesChange() - // Emit granular event - this.events.onTextUpdate?.( - this.currentAssistantMessageId, - this.currentSegmentText, - ) - } + // Emit granular event + this.events.onTextUpdate?.(messageId, state.currentSegmentText) } /** @@ -952,81 +1233,116 @@ export class StreamProcessor { * @see docs/chat-architecture.md#single-shot-text-response — Finalization step */ finalizeStream(): void { - // Safety net: complete any remaining tool calls (e.g. on network errors / aborted streams) - this.completeAllToolCalls() + let lastAssistantMessage: UIMessage | undefined - // Emit any pending text if not already emitted - if (this.currentSegmentText !== this.lastEmittedText) { - this.emitTextUpdate() + // Finalize ALL active messages + for (const messageId of this.activeMessageIds) { + const state = this.getMessageState(messageId) + if (!state) continue + + // Complete any remaining tool calls + this.completeAllToolCallsForMessage(messageId) + + // Emit any pending text if not already emitted + if (state.currentSegmentText !== state.lastEmittedText) { + this.emitTextUpdateForMessage(messageId) + } + + state.isComplete = true + + const msg = this.messages.find((m) => m.id === messageId) + if (msg && msg.role === 'assistant') { + lastAssistantMessage = msg + } } - // Remove the assistant message if it only contains whitespace text - // (no tool calls, no meaningful content). This handles models like Gemini - // that sometimes return just "\n" during auto-continuation. + this.activeMessageIds.clear() + + // Remove whitespace-only assistant messages (handles models like Gemini + // that sometimes return just "\n" during auto-continuation). // Preserve the message on errors so the UI can show error state. - if (this.currentAssistantMessageId && !this.hasError) { - const assistantMessage = this.messages.find( - (m) => m.id === this.currentAssistantMessageId, - ) - if (assistantMessage && this.isWhitespaceOnlyMessage(assistantMessage)) { + if (lastAssistantMessage && !this.hasError) { + if (this.isWhitespaceOnlyMessage(lastAssistantMessage)) { this.messages = this.messages.filter( - (m) => m.id !== this.currentAssistantMessageId, + (m) => m.id !== lastAssistantMessage!.id, ) this.emitMessagesChange() - this.currentAssistantMessageId = null return } } - // Emit stream end event (only if a message was actually created) - if (this.currentAssistantMessageId) { - const assistantMessage = this.messages.find( - (m) => m.id === this.currentAssistantMessageId, - ) - if (assistantMessage) { - this.events.onStreamEnd?.(assistantMessage) - } + // Emit stream end for the last assistant message + if (lastAssistantMessage) { + this.events.onStreamEnd?.(lastAssistantMessage) } } /** - * Get completed tool calls in API format + * Get completed tool calls in API format (aggregated across all messages) */ private getCompletedToolCalls(): Array { - return Array.from(this.toolCalls.values()) - .filter((tc) => tc.state === 'input-complete') - .map((tc) => ({ - id: tc.id, - type: 'function' as const, - function: { - name: tc.name, - arguments: tc.arguments, - }, - })) + const result: Array = [] + for (const state of this.messageStates.values()) { + for (const tc of state.toolCalls.values()) { + if (tc.state === 'input-complete') { + result.push({ + id: tc.id, + type: 'function' as const, + function: { + name: tc.name, + arguments: tc.arguments, + }, + }) + } + } + } + return result } /** - * Get current result + * Get current result (aggregated across all messages) */ private getResult(): ProcessorResult { const toolCalls = this.getCompletedToolCalls() + let content = '' + let thinking = '' + + for (const state of this.messageStates.values()) { + content += state.totalTextContent + thinking += state.thinkingContent + } + return { - content: this.totalTextContent, - thinking: this.thinkingContent || undefined, + content, + thinking: thinking || undefined, toolCalls: toolCalls.length > 0 ? toolCalls : undefined, finishReason: this.finishReason, } } /** - * Get current processor state + * Get current processor state (aggregated across all messages) */ getState(): ProcessorState { + let content = '' + let thinking = '' + const toolCalls = new Map() + const toolCallOrder: Array = [] + + for (const state of this.messageStates.values()) { + content += state.totalTextContent + thinking += state.thinkingContent + for (const [id, tc] of state.toolCalls) { + toolCalls.set(id, tc) + } + toolCallOrder.push(...state.toolCallOrder) + } + return { - content: this.totalTextContent, - thinking: this.thinkingContent, - toolCalls: new Map(this.toolCalls), - toolCallOrder: [...this.toolCallOrder], + content, + thinking, + toolCalls, + toolCallOrder, finishReason: this.finishReason, done: this.isDone, } @@ -1056,12 +1372,10 @@ export class StreamProcessor { * Reset stream state (but keep messages) */ private resetStreamState(): void { - this.totalTextContent = '' - this.currentSegmentText = '' - this.lastEmittedText = '' - this.thinkingContent = '' - this.toolCalls.clear() - this.toolCallOrder = [] + this.messageStates.clear() + this.activeMessageIds.clear() + this.toolCallToMessage.clear() + this.pendingManualMessageId = null this.finishReason = null this.hasError = false this.isDone = false @@ -1074,7 +1388,6 @@ export class StreamProcessor { reset(): void { this.resetStreamState() this.messages = [] - this.currentAssistantMessageId = null } /** diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index ddb7f812..f322bc11 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -621,8 +621,8 @@ describe('StreamProcessor', () => { processor.processChunk(ev.textContent('First segment')) processor.processChunk(ev.toolStart('tc-1', 'search')) processor.processChunk(ev.toolEnd('tc-1', 'search', { input: {} })) - processor.processChunk(ev.textStart('msg-2')) - processor.processChunk(ev.textContent('Second segment', 'msg-2')) + processor.processChunk(ev.textStart()) + processor.processChunk(ev.textContent('Second segment')) processor.processChunk(ev.runFinished('stop')) processor.finalizeStream() @@ -649,10 +649,10 @@ describe('StreamProcessor', () => { ev.toolEnd('call_1', 'getWeather', { result: '{"temp":"72F"}' }), ) - // Second adapter stream: more text - processor.processChunk(ev.textStart('msg-2')) - processor.processChunk(ev.textContent("It's 72F in NYC.", 'msg-2')) - processor.processChunk(ev.textEnd('msg-2')) + // Second adapter stream: more text (same message) + processor.processChunk(ev.textStart()) + processor.processChunk(ev.textContent("It's 72F in NYC.")) + processor.processChunk(ev.textEnd()) processor.processChunk(ev.runFinished('stop')) processor.finalizeStream() @@ -685,9 +685,9 @@ describe('StreamProcessor', () => { processor.processChunk(ev.textEnd()) processor.processChunk(ev.toolStart('tc-1', 'tool')) processor.processChunk(ev.toolEnd('tc-1', 'tool')) - processor.processChunk(ev.textStart('msg-2')) - processor.processChunk(ev.textContent('After', 'msg-2')) - processor.processChunk(ev.textEnd('msg-2')) + processor.processChunk(ev.textStart()) + processor.processChunk(ev.textContent('After')) + processor.processChunk(ev.textEnd()) processor.processChunk(ev.runFinished('stop')) processor.finalizeStream() @@ -1798,4 +1798,445 @@ describe('StreamProcessor', () => { expect(state2.toolCallOrder).toEqual(['tc-1']) }) }) + + describe('TEXT_MESSAGE_START', () => { + it('should create a message with correct role and messageId', () => { + const processor = new StreamProcessor() + + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-1', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-1', + delta: 'Hello', + timestamp: Date.now(), + } as StreamChunk) + + processor.finalizeStream() + + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + expect(messages[0]?.id).toBe('msg-1') + expect(messages[0]?.role).toBe('assistant') + expect(messages[0]?.parts[0]).toEqual({ + type: 'text', + content: 'Hello', + }) + }) + + it('should create a user message via TEXT_MESSAGE_START', () => { + const processor = new StreamProcessor() + + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'user-msg-1', + role: 'user', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_END', + messageId: 'user-msg-1', + timestamp: Date.now(), + } as StreamChunk) + + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + expect(messages[0]?.id).toBe('user-msg-1') + expect(messages[0]?.role).toBe('user') + }) + + it('should emit onStreamStart when a new message arrives', () => { + const onStreamStart = vi.fn() + const processor = new StreamProcessor({ events: { onStreamStart } }) + + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-1', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + expect(onStreamStart).toHaveBeenCalledTimes(1) + }) + }) + + describe('TEXT_MESSAGE_END', () => { + it('should not emit onStreamEnd (that happens in finalizeStream)', () => { + const onStreamEnd = vi.fn() + const processor = new StreamProcessor({ events: { onStreamEnd } }) + + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-1', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-1', + delta: 'Hello world', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_END', + messageId: 'msg-1', + timestamp: Date.now(), + } as StreamChunk) + + // TEXT_MESSAGE_END means "text segment done", not "message done" + // onStreamEnd fires from finalizeStream(), not TEXT_MESSAGE_END + expect(onStreamEnd).not.toHaveBeenCalled() + + processor.finalizeStream() + + expect(onStreamEnd).toHaveBeenCalledTimes(1) + const endMessage = onStreamEnd.mock.calls[0]![0] as UIMessage + expect(endMessage.id).toBe('msg-1') + expect(endMessage.parts[0]).toEqual({ + type: 'text', + content: 'Hello world', + }) + }) + + it('should emit pending text on TEXT_MESSAGE_END', () => { + const onTextUpdate = vi.fn() + // Use a strategy that never emits during streaming + const processor = new StreamProcessor({ + events: { onTextUpdate }, + chunkStrategy: { + shouldEmit: () => false, + }, + }) + + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-1', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-1', + delta: 'Hello', + timestamp: Date.now(), + } as StreamChunk) + + // Text not emitted yet due to strategy + expect(onTextUpdate).not.toHaveBeenCalled() + + processor.processChunk({ + type: 'TEXT_MESSAGE_END', + messageId: 'msg-1', + timestamp: Date.now(), + } as StreamChunk) + + // TEXT_MESSAGE_END should flush pending text + expect(onTextUpdate).toHaveBeenCalledWith('msg-1', 'Hello') + }) + }) + + describe('interleaved messages', () => { + it('should handle two interleaved assistant messages', () => { + const onMessagesChange = vi.fn() + const processor = new StreamProcessor({ + events: { onMessagesChange }, + }) + + // Start two messages + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-a', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-b', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + // Interleave content + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-a', + delta: 'Hello from A', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-b', + delta: 'Hello from B', + timestamp: Date.now(), + } as StreamChunk) + + // End both + processor.processChunk({ + type: 'TEXT_MESSAGE_END', + messageId: 'msg-a', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_END', + messageId: 'msg-b', + timestamp: Date.now(), + } as StreamChunk) + + const messages = processor.getMessages() + expect(messages).toHaveLength(2) + + expect(messages[0]?.id).toBe('msg-a') + expect(messages[0]?.parts[0]).toEqual({ + type: 'text', + content: 'Hello from A', + }) + + expect(messages[1]?.id).toBe('msg-b') + expect(messages[1]?.parts[0]).toEqual({ + type: 'text', + content: 'Hello from B', + }) + }) + }) + + describe('STATE_SNAPSHOT', () => { + it('should hydrate messages from a state snapshot', () => { + const onMessagesChange = vi.fn() + const processor = new StreamProcessor({ + events: { onMessagesChange }, + }) + + const snapshotMessages: Array = [ + { + id: 'snap-1', + role: 'user', + parts: [{ type: 'text', content: 'Hello' }], + createdAt: new Date(), + }, + { + id: 'snap-2', + role: 'assistant', + parts: [{ type: 'text', content: 'Hi there!' }], + createdAt: new Date(), + }, + ] + + processor.processChunk({ + type: 'STATE_SNAPSHOT', + state: { messages: snapshotMessages }, + timestamp: Date.now(), + } as StreamChunk) + + const messages = processor.getMessages() + expect(messages).toHaveLength(2) + expect(messages[0]?.id).toBe('snap-1') + expect(messages[0]?.role).toBe('user') + expect(messages[1]?.id).toBe('snap-2') + expect(messages[1]?.role).toBe('assistant') + expect(onMessagesChange).toHaveBeenCalled() + }) + + it('should ignore STATE_SNAPSHOT without messages', () => { + const onMessagesChange = vi.fn() + const processor = new StreamProcessor({ + events: { onMessagesChange }, + }) + + processor.processChunk({ + type: 'STATE_SNAPSHOT', + state: { someOtherData: 'value' }, + timestamp: Date.now(), + } as StreamChunk) + + expect(processor.getMessages()).toHaveLength(0) + expect(onMessagesChange).not.toHaveBeenCalled() + }) + }) + + describe('startAssistantMessage + TEXT_MESSAGE_START dedup', () => { + it('should associate TEXT_MESSAGE_START with pending manual message (different ID)', () => { + const processor = new StreamProcessor() + processor.startAssistantMessage() + + // Server sends TEXT_MESSAGE_START with a different ID + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'server-msg-1', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + // Should have only one message (not two) + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + + // The message should have been updated to the server's ID + expect(messages[0]?.id).toBe('server-msg-1') + expect(messages[0]?.role).toBe('assistant') + + // Content should route to the correct message + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'server-msg-1', + delta: 'Hello', + timestamp: Date.now(), + } as StreamChunk) + + processor.finalizeStream() + + expect(processor.getMessages()[0]?.parts[0]).toEqual({ + type: 'text', + content: 'Hello', + }) + }) + + it('should associate TEXT_MESSAGE_START with pending manual message (same ID)', () => { + const processor = new StreamProcessor() + processor.startAssistantMessage('my-msg-id') + + // Server sends TEXT_MESSAGE_START with the same ID + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'my-msg-id', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + // Should still have only one message + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + expect(messages[0]?.id).toBe('my-msg-id') + }) + + it('should work when TEXT_MESSAGE_START arrives without startAssistantMessage', () => { + const onStreamStart = vi.fn() + const processor = new StreamProcessor({ + events: { onStreamStart }, + }) + + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-1', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-1', + delta: 'Hello', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_END', + messageId: 'msg-1', + timestamp: Date.now(), + } as StreamChunk) + + expect(onStreamStart).toHaveBeenCalledTimes(1) + + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + expect(messages[0]?.id).toBe('msg-1') + expect(messages[0]?.parts[0]).toEqual({ + type: 'text', + content: 'Hello', + }) + }) + }) + + describe('backward compat: startAssistantMessage without TEXT_MESSAGE_START', () => { + it('should still work when only startAssistantMessage is used', () => { + const processor = new StreamProcessor() + const msgId = processor.startAssistantMessage() + + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'some-other-id', + delta: 'Hello', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'some-other-id', + delta: ' world', + timestamp: Date.now(), + } as StreamChunk) + + processor.finalizeStream() + + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + expect(messages[0]?.id).toBe(msgId) + expect(messages[0]?.parts[0]).toEqual({ + type: 'text', + content: 'Hello world', + }) + }) + }) + + describe('per-message tool calls', () => { + it('should route tool calls to the correct message via parentMessageId', () => { + const processor = new StreamProcessor() + + // Create two messages + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-a', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + // Tool call on msg-a + processor.processChunk({ + type: 'TOOL_CALL_START', + toolCallId: 'tc-1', + toolName: 'myTool', + parentMessageId: 'msg-a', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TOOL_CALL_ARGS', + toolCallId: 'tc-1', + delta: '{"arg": "val"}', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TOOL_CALL_END', + toolCallId: 'tc-1', + timestamp: Date.now(), + } as StreamChunk) + + processor.finalizeStream() + + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + + const toolCallPart = messages[0]?.parts.find( + (p) => p.type === 'tool-call', + ) + expect(toolCallPart).toBeDefined() + expect(toolCallPart?.type).toBe('tool-call') + if (toolCallPart?.type === 'tool-call') { + expect(toolCallPart.name).toBe('myTool') + expect(toolCallPart.state).toBe('input-complete') + } + }) + }) }) From f036f55be01e9b055c209588c17bb4b1ff379b4a Mon Sep 17 00:00:00 2001 From: James Arthur Date: Mon, 9 Feb 2026 17:49:49 -0800 Subject: [PATCH 04/20] feat(ai): replace STATE_SNAPSHOT with MESSAGES_SNAPSHOT event Add MessagesSnapshotEvent as a first-class AG-UI event type for conversation hydration. Replace the previous STATE_SNAPSHOT handler (which extracted messages from arbitrary state) with a dedicated MESSAGES_SNAPSHOT handler that accepts a typed messages array. - Add MessagesSnapshotEvent type to AGUIEventType and AGUIEvent unions - Add MESSAGES_SNAPSHOT case in StreamProcessor.processChunk() - Remove STATE_SNAPSHOT handler (falls through to default no-op) - Fix onStreamEnd to fire per-message (not only when no active messages remain) - Fix getActiveAssistantMessageId to return on first reverse match - Fix ensureAssistantMessage to emit onStreamStart and onMessagesChange - Add proposal docs for resumeable session support Co-Authored-By: Claude Opus 4.6 --- docs/proposals/resumeable-session-plan.md | 1033 +++++++++++++++++ docs/proposals/session-stream-support.md | 523 +++++++++ .../src/activities/chat/stream/processor.ts | 45 +- packages/typescript/ai/src/types.ts | 15 + .../ai/tests/stream-processor.test.ts | 165 ++- 5 files changed, 1716 insertions(+), 65 deletions(-) create mode 100644 docs/proposals/resumeable-session-plan.md create mode 100644 docs/proposals/session-stream-support.md diff --git a/docs/proposals/resumeable-session-plan.md b/docs/proposals/resumeable-session-plan.md new file mode 100644 index 00000000..98d687bb --- /dev/null +++ b/docs/proposals/resumeable-session-plan.md @@ -0,0 +1,1033 @@ +# Implementation Plan: Resumeable Session Support + +> **Purpose**: Step-by-step guide for implementing resumeable session support in TanStack AI. +> Produces a PR with code changes + a PR description markdown artifact for review. +> +> **Approach**: Unified SessionAdapter (Approach B). The ChatClient always operates +> through a `SessionAdapter` interface. When only a `ConnectionAdapter` is provided, +> it is wrapped in a `DefaultSessionAdapter` internally. +> +> **Design context**: See `docs/proposals/session-stream-support.md` for full rationale. + +--- + +## Progress + +Steps 1–3 are **complete** (with revisions noted inline). The following revisions +were made during implementation: + +1. `getActiveAssistantMessageId` — iterates the Set in reverse via `Array.from()` + + backward loop, returning on first assistant match +2. `onStreamEnd` — fires for every message on `TEXT_MESSAGE_END`, not just the last one +3. `STATE_SNAPSHOT` — handler and tests removed. Event falls through to the default + no-op case. Replaced by `MESSAGES_SNAPSHOT` (see Step 3e) +4. `ensureAssistantMessage` — calls `onStreamStart` and `emitMessagesChange` when it + auto-creates a message + +### PR Boundary + +**PR 1 (this PR)**: Steps 1–3 — StreamProcessor per-message state refactor + +AG-UI type alignment + `MESSAGES_SNAPSHOT` support. + +**PR 2 (follow-up)**: Steps 4–7 — SessionAdapter interface, DefaultSessionAdapter, +ChatClient refactor, framework hook updates. + +**Future PR**: `STATE_SNAPSHOT` / `STATE_DELTA` handling with managed `sessionState` +container (see "Extensibility: Session State" section at the end of this document). + +--- + +## Pre-flight + +Before starting, verify the baseline: + +```bash +cd packages/typescript/ai && pnpm test:lib && cd ../../.. +cd packages/typescript/ai-client && pnpm test:lib && cd ../../.. +cd packages/typescript/ai-react && pnpm test:lib && cd ../../.. +``` + +All existing tests must pass before any changes. + +--- + +## Step 1: AG-UI Type Alignment [DONE] + +**Files**: `packages/typescript/ai/src/types.ts` + +### 1a. Expand `TextMessageStartEvent.role` + +```typescript +// Before +export interface TextMessageStartEvent extends BaseAGUIEvent { + type: 'TEXT_MESSAGE_START' + messageId: string + role: 'assistant' // hardcoded +} + +// After +export interface TextMessageStartEvent extends BaseAGUIEvent { + type: 'TEXT_MESSAGE_START' + messageId: string + role: 'user' | 'assistant' | 'system' | 'tool' +} +``` + +### 1b. Add `parentMessageId` to `ToolCallStartEvent` + +```typescript +export interface ToolCallStartEvent extends BaseAGUIEvent { + type: 'TOOL_CALL_START' + toolCallId: string + toolName: string + parentMessageId?: string // NEW + index?: number +} +``` + +### 1c. Add `MessagesSnapshotEvent` + +AG-UI defines `MessagesSnapshot` as a first-class event type, distinct from +`StateSnapshot`. It delivers a complete history of messages in the current +conversation — used for initializing chat history, synchronizing after connection +interruptions, or hydrating state when a user joins an ongoing conversation. + +```typescript +/** + * Emitted to provide a snapshot of all messages in a conversation. + * + * Unlike StateSnapshot (which carries arbitrary application state), + * MessagesSnapshot specifically delivers the conversation transcript. + * This is a first-class AG-UI event type. + */ +export interface MessagesSnapshotEvent extends BaseAGUIEvent { + type: 'MESSAGES_SNAPSHOT' + /** Complete array of messages in the conversation */ + messages: Array +} +``` + +Add `'MESSAGES_SNAPSHOT'` to the `AGUIEventType` union and `MessagesSnapshotEvent` +to the `AGUIEvent` union (and therefore `StreamChunk`). + +### 1d. Verify + +Run `pnpm test:types` in the `ai` package. These are purely additive type changes +with no behavioral impact. Existing code that sets `role: 'assistant'` still compiles. + +--- + +## Step 2: StreamProcessor — Per-Message State Types [DONE] + +**Files**: `packages/typescript/ai/src/activities/chat/stream/types.ts` + +### 2a. Add `MessageStreamState` interface + +```typescript +/** + * Per-message streaming state. + * Tracks the accumulation of text, tool calls, and thinking content + * for a single message in the stream. + */ +export interface MessageStreamState { + id: string + role: 'user' | 'assistant' | 'system' | 'tool' + totalTextContent: string + currentSegmentText: string + lastEmittedText: string + thinkingContent: string + toolCalls: Map + toolCallOrder: Array + hasToolCallsSinceTextStart: boolean + isComplete: boolean +} +``` + +--- + +## Step 3: StreamProcessor — Refactor to Per-Message State [DONE] + +**Files**: `packages/typescript/ai/src/activities/chat/stream/processor.ts` + +This is the largest change. The existing single-message state variables are replaced +with a `Map` keyed by messageId. + +### 3a. Replace instance variables + +Remove: +``` +- currentAssistantMessageId: string | null +- totalTextContent: string +- currentSegmentText: string +- lastEmittedText: string +- thinkingContent: string +- toolCalls: Map +- toolCallOrder: Array +- hasToolCallsSinceTextStart: boolean +``` + +Add: +``` +- messageStates: Map +- activeMessageIds: Set // messages currently streaming +- toolCallToMessage: Map // toolCallId → messageId +- pendingManualMessageId: string | null // from startAssistantMessage() for compat +``` + +Keep shared: +``` +- finishReason: string | null +- isDone: boolean +``` + +### 3b. Add helper methods + +- `createMessageState(messageId, role): MessageStreamState` — creates and stores state +- `getMessageState(messageId): MessageStreamState | undefined` — lookup by messageId +- `getActiveAssistantMessageId(): string | null` — returns the most recent active + assistant messageId. Iterates `activeMessageIds` in reverse (Set is insertion-order; + convert to array and search backward). Used as fallback for events without messageId + routing. +- `ensureAssistantMessage(preferredId?): { messageId, state }` — finds or auto-creates + an assistant message. Fires `onStreamStart` and `emitMessagesChange` when it + auto-creates (backward compat for streams without `TEXT_MESSAGE_START`). + +### 3c. Handle `TEXT_MESSAGE_START` in `processChunk` + +Currently in the `default:` case (ignored). Move to explicit handler: + +```typescript +case 'TEXT_MESSAGE_START': + this.handleTextMessageStartEvent(chunk) + break +``` + +Handler logic: +1. If `pendingManualMessageId` is set (from `startAssistantMessage()`): + - Associate the manual message with this event's messageId + - Update the message's ID in the messages array if they differ + - Clear `pendingManualMessageId` + - Create `MessageStreamState` for the (now-resolved) messageId +2. If a message with this messageId already exists in messages (dedup): + - Just add to `activeMessageIds` and create state if missing +3. Otherwise: + - Create a new `UIMessage` with the given `messageId` and `role` + - Add to messages array + - Create `MessageStreamState` + - Add to `activeMessageIds` + - Emit `onStreamStart` and `onMessagesChange` + +### 3d. Handle `TEXT_MESSAGE_END` in `processChunk` + +Currently in the `default:` case (ignored). Move to explicit handler: + +```typescript +case 'TEXT_MESSAGE_END': + this.handleTextMessageEndEvent(chunk) + break +``` + +Handler logic: +1. Get the `MessageStreamState` for `chunk.messageId` +2. Emit any pending text for this message +3. Complete all tool calls for this message +4. Mark state as `isComplete = true` +5. Remove from `activeMessageIds` +6. Emit `onStreamEnd` for this message (fires per-message, not only on last) + +### 3e. Handle `MESSAGES_SNAPSHOT` in `processChunk` + +Add explicit handler for the AG-UI `MESSAGES_SNAPSHOT` event: + +```typescript +case 'MESSAGES_SNAPSHOT': + this.handleMessagesSnapshotEvent(chunk) + break +``` + +Handler logic: +1. Set `this.messages` to the snapshot messages (normalize with `normalizeToUIMessage` + if needed, or accept as-is if already in `UIMessage` format) +2. Emit `onMessagesChange` + +This is deliberately minimal. `MESSAGES_SNAPSHOT` is a first-class AG-UI event +for conversation hydration. It does NOT handle arbitrary application state — +that's `STATE_SNAPSHOT` / `STATE_DELTA`, which remain in the default no-op case +and are deferred to a future PR (see "Extensibility: Session State" below). + +**Why this isn't a special case**: `MESSAGES_SNAPSHOT` is a distinct AG-UI event +type with its own shape (`{ messages: Array<...> }`), separate from `StateSnapshot` +(`{ snapshot: Record }`). The SessionAdapter returns +`AsyncIterable` where `StreamChunk = AGUIEvent`. Adding event handlers +is purely additive — each new `case` branch in `processChunk()` handles one more +event type. The adapter interface doesn't change. + +### 3f. Update `TEXT_MESSAGE_CONTENT` handler + +Route by `chunk.messageId`: +1. Get state via `ensureAssistantMessage(chunk.messageId)` — falls back to active + assistant message, or auto-creates one (backward compat) +2. All text accumulation logic stays the same, but operates on the per-message state +3. `emitTextUpdate` receives the messageId to update the correct message + +### 3g. Update `TOOL_CALL_START` handler + +Route by `parentMessageId` or active message: +1. Determine messageId: `chunk.parentMessageId ?? getActiveAssistantMessageId()` +2. Store mapping: `toolCallToMessage.set(chunk.toolCallId, messageId)` +3. Get state for that messageId +4. Rest of logic (create InternalToolCallState, update UIMessage) stays the same + but uses `messageId` from the mapping instead of `currentAssistantMessageId` + +### 3h. Update `TOOL_CALL_ARGS`, `TOOL_CALL_END` handlers + +Route via `toolCallToMessage.get(chunk.toolCallId)` to get the messageId. +Logic stays the same but uses per-message state. + +### 3i. Update `STEP_FINISHED`, `CUSTOM` handlers + +Route to `getActiveAssistantMessageId()`. Logic stays the same. + +### 3j. Update `startAssistantMessage()` for backwards compatibility + +```typescript +startAssistantMessage(messageId?: string): string { + this.resetStreamState() + const id = messageId ?? generateMessageId() + + const assistantMessage: UIMessage = { + id, role: 'assistant', parts: [], createdAt: new Date() + } + + this.messages = [...this.messages, assistantMessage] + this.createMessageState(id, 'assistant') + this.activeMessageIds.add(id) + + // Mark as manually created — TEXT_MESSAGE_START will associate with this + this.pendingManualMessageId = id + + this.events.onStreamStart?.() + this.emitMessagesChange() + return id +} +``` + +### 3k. Update `resetStreamState()` + +Clear `messageStates`, `activeMessageIds`, `toolCallToMessage`, `pendingManualMessageId`. + +### 3l. Update `finalizeStream()` + +Finalize ALL active messages (emit pending text, complete tool calls for each). +Clear `activeMessageIds`. Emit `onStreamEnd` for the last assistant message. + +### 3m. Update `areAllToolsComplete()` + +No change needed — it already looks at the last assistant message's parts in the +messages array, not at internal state. + +### 3n. Verify + +Run existing `stream-processor.test.ts`. All existing tests should pass because +they use `startAssistantMessage()` which creates the per-message state via the +backwards-compat path. + +Add new tests: +- `TEXT_MESSAGE_START` creates a message with correct role and messageId +- `TEXT_MESSAGE_START` with `role: 'user'` creates a user message +- `TEXT_MESSAGE_END` finalizes the message and emits `onStreamEnd` +- `TEXT_MESSAGE_END` emits pending text that was buffered by chunk strategy +- Two interleaved assistant messages (TEXT_MESSAGE_START for msg-a, TEXT_MESSAGE_START + for msg-b, content for msg-a, content for msg-b, END for msg-a, END for msg-b) +- `onStreamEnd` fires for each message that ends (two messages = two calls) +- Dedup: `startAssistantMessage()` followed by `TEXT_MESSAGE_START` with different ID + associates them correctly (single message, not duplicate) +- Dedup: `startAssistantMessage('id')` followed by `TEXT_MESSAGE_START` with same ID +- `TEXT_MESSAGE_START` without prior `startAssistantMessage()` works and fires `onStreamStart` +- `ensureAssistantMessage` auto-creates message and fires `onStreamStart` when content + arrives without prior `TEXT_MESSAGE_START` +- Backward compat: `startAssistantMessage()` without `TEXT_MESSAGE_START` still works +- Tool calls routed via `parentMessageId` +- `MESSAGES_SNAPSHOT` hydrates messages and emits `onMessagesChange` +- `MESSAGES_SNAPSHOT` replaces existing messages (not appends) + +--- + +## Step 4: SessionAdapter Interface + DefaultSessionAdapter + +**Files**: `packages/typescript/ai-client/src/session-adapter.ts` (new file) + +### 4a. Define the `SessionAdapter` interface + +```typescript +import type { StreamChunk, UIMessage } from '@tanstack/ai' +import type { ConnectionAdapter } from './connection-adapters' + +/** + * Session adapter interface for persistent stream-based chat sessions. + * + * Unlike ConnectionAdapter (which creates a new stream per request), + * a SessionAdapter maintains a persistent subscription. Responses from + * send() arrive through subscribe(), not as a return value. + * + * The subscribe() stream yields standard AG-UI events (StreamChunk). + * The processor handles whichever event types it supports — currently + * text message lifecycle, tool calls, and MESSAGES_SNAPSHOT. Future + * event handlers (STATE_SNAPSHOT, STATE_DELTA, etc.) are purely additive. + */ +export interface SessionAdapter { + /** + * Subscribe to the session stream. + * Returns an async iterable that yields chunks continuously. + * For durable sessions, this may first yield a MESSAGES_SNAPSHOT + * to hydrate the conversation, then subscribe to the live stream + * from the appropriate offset. + */ + subscribe(signal?: AbortSignal): AsyncIterable + + /** + * Send messages to the session. + * For durable sessions, the proxy writes to the stream and forwards to the API. + * The response arrives through subscribe(), not as a return value. + */ + send( + messages: Array, + data?: Record, + signal?: AbortSignal, + ): Promise +} +``` + +### 4b. Implement `createDefaultSession()` + +Wraps a `ConnectionAdapter` into a `SessionAdapter` using an async queue pattern. +`send()` calls `connection.connect()` and pushes chunks to the queue. +`subscribe()` yields chunks from the queue. + +```typescript +export function createDefaultSession( + connection: ConnectionAdapter, +): SessionAdapter { + // Async queue: send() pushes chunks, subscribe() yields them + const buffer: Array = [] + const waiters: Array<(chunk: StreamChunk | null) => void> = [] + + function push(chunk: StreamChunk): void { + const waiter = waiters.shift() + if (waiter) { + waiter(chunk) + } else { + buffer.push(chunk) + } + } + + return { + async *subscribe(signal?: AbortSignal) { + while (!signal?.aborted) { + let chunk: StreamChunk | null + if (buffer.length > 0) { + chunk = buffer.shift()! + } else { + chunk = await new Promise((resolve) => { + waiters.push(resolve) + signal?.addEventListener('abort', () => resolve(null), { once: true }) + }) + } + if (chunk !== null) yield chunk + } + }, + + async send(messages, data, signal) { + const stream = connection.connect(messages, data, signal) + for await (const chunk of stream) { + push(chunk) + } + }, + } +} +``` + +### 4c. Add tests for DefaultSessionAdapter + +- Basic: send text chunks -> subscribe yields them +- Multiple sends: chunks from send #1 then send #2 arrive in order +- Abort: aborting the subscribe signal stops the iterator +- Error: errors in connection.connect() propagate through send() + +--- + +## Step 5: ChatClient Refactor + +**Files**: `packages/typescript/ai-client/src/chat-client.ts` + +This is the second-largest change. The `streamResponse()` and `processStream()` +methods are removed. All chunk consumption goes through the subscription loop. + +### 5a. Update `ChatClientOptions` + +In `packages/typescript/ai-client/src/types.ts`: + +```typescript +export interface ChatClientOptions = any> { + /** + * Connection adapter for streaming (request-response mode). + * Wrapped in a DefaultSessionAdapter internally. + * Provide either `connection` or `session`, not both. + */ + connection?: ConnectionAdapter + + /** + * Session adapter for persistent stream-based sessions. + * When provided, takes over from connection. + * Provide either `connection` or `session`, not both. + */ + session?: SessionAdapter + + // ... rest unchanged +} +``` + +Note: `connection` changes from required to optional. This is a breaking type +change but existing code providing `connection` still compiles. + +### 5b. Update ChatClient constructor + +```typescript +constructor(options: ChatClientOptions) { + // Resolve session adapter + if (options.session) { + this.session = options.session + } else if (options.connection) { + this.session = createDefaultSession(options.connection) + } else { + throw new Error('Either connection or session must be provided') + } + + // ... existing setup (processor, callbacks, tools) ... + + // Start subscription + this.startSubscription() +} +``` + +New instance variables: +```typescript +private session: SessionAdapter +private subscriptionAbortController: AbortController | null = null +``` + +Remove: +```typescript +private connection: ConnectionAdapter // replaced by session +``` + +### 5c. Add `startSubscription()` method + +```typescript +private startSubscription(): void { + this.subscriptionAbortController = new AbortController() + const signal = this.subscriptionAbortController.signal + + // Run subscription in background (don't await in constructor) + this.consumeSubscription(signal).catch((err) => { + if (err instanceof Error && err.name !== 'AbortError') { + this.setError(err) + this.setStatus('error') + this.callbacksRef.current.onError(err) + } + }) +} + +private async consumeSubscription(signal: AbortSignal): Promise { + const stream = this.session.subscribe(signal) + for await (const chunk of stream) { + if (signal.aborted) break + this.callbacksRef.current.onChunk(chunk) + this.processor.processChunk(chunk) + await new Promise((resolve) => setTimeout(resolve, 0)) + } +} +``` + +### 5d. Rewrite `sendMessage()` + +Remove the `streamResponse()` call. Instead, send through the session adapter: + +```typescript +async sendMessage(content: string | MultimodalContent, body?: Record): Promise { + const emptyMessage = typeof content === 'string' && !content.trim() + if (emptyMessage || this.isLoading) return + + const normalizedContent = this.normalizeMessageInput(content) + this.pendingMessageBody = body + + // Add user message optimistically + const userMessage = this.processor.addUserMessage( + normalizedContent.content, + normalizedContent.id, + ) + this.events.messageSent(userMessage.id, normalizedContent.content) + + // Send through session adapter + this.setIsLoading(true) + this.setStatus('submitted') + this.setError(undefined) + + try { + const mergedBody = { + ...this.body, + ...this.pendingMessageBody, + conversationId: this.uniqueId, + } + this.pendingMessageBody = undefined + + await this.session.send(this.processor.getMessages(), mergedBody) + } catch (err) { + if (err instanceof Error) { + if (err.name === 'AbortError') return + this.setError(err) + this.setStatus('error') + this.callbacksRef.current.onError(err) + } + this.setIsLoading(false) + } +} +``` + +**Key difference**: `sendMessage` resolves when `session.send()` completes +(HTTP request done), not when the response finishes streaming. The response +arrives through the subscription. `isLoading` is set to false by the processor's +`onStreamEnd` event (wired up in the constructor callbacks). + +### 5e. Wire processor events to isLoading + +Update the processor event wiring in the constructor: + +```typescript +onStreamStart: () => { + this.setStatus('streaming') + // In session mode, streaming status already set via sendMessage +}, +onStreamEnd: (message: UIMessage) => { + this.callbacksRef.current.onFinish(message) + this.setIsLoading(false) // NEW: reset loading when generation ends + this.setStatus('ready') + + // Check for continuation (agent loop) + this.checkForContinuation().catch(console.error) +}, +``` + +### 5f. Rewrite `checkForContinuation()` + +```typescript +private async checkForContinuation(): Promise { + if (this.continuationPending) return + if (!this.shouldAutoSend()) return + + this.continuationPending = true + try { + this.setIsLoading(true) + this.setStatus('submitted') + await this.session.send(this.processor.getMessages(), { + ...this.body, + conversationId: this.uniqueId, + }) + } catch (err) { + if (err instanceof Error && err.name !== 'AbortError') { + this.setError(err) + this.setStatus('error') + this.callbacksRef.current.onError(err) + } + this.setIsLoading(false) + } finally { + this.continuationPending = false + } +} +``` + +### 5g. Simplify `stop()` + +```typescript +stop(): void { + this.subscriptionAbortController?.abort() + this.subscriptionAbortController = null + this.setIsLoading(false) + this.setStatus('ready') + this.events.stopped() +} +``` + +### 5h. Update `reload()` + +```typescript +async reload(): Promise { + const messages = this.processor.getMessages() + if (messages.length === 0) return + + const lastUserMessageIndex = messages.findLastIndex(m => m.role === 'user') + if (lastUserMessageIndex === -1) return + + this.events.reloaded(lastUserMessageIndex) + this.processor.removeMessagesAfter(lastUserMessageIndex) + + // Send through session adapter + this.setIsLoading(true) + this.setStatus('submitted') + try { + await this.session.send(this.processor.getMessages(), { + ...this.body, + conversationId: this.uniqueId, + }) + } catch (err) { + if (err instanceof Error && err.name !== 'AbortError') { + this.setError(err) + this.setStatus('error') + this.callbacksRef.current.onError(err) + } + this.setIsLoading(false) + } +} +``` + +### 5i. Update `append()` + +```typescript +async append(message: UIMessage | ModelMessage): Promise { + const normalizedMessage = normalizeToUIMessage(message, generateMessageId) + if (normalizedMessage.role === 'system') return + + const uiMessage = normalizedMessage as UIMessage + this.events.messageAppended(uiMessage) + + const messages = this.processor.getMessages() + this.processor.setMessages([...messages, uiMessage]) + + this.setIsLoading(true) + this.setStatus('submitted') + try { + await this.session.send(this.processor.getMessages(), { + ...this.body, + conversationId: this.uniqueId, + }) + } catch (err) { + if (err instanceof Error && err.name !== 'AbortError') { + this.setError(err) + this.setStatus('error') + this.callbacksRef.current.onError(err) + } + this.setIsLoading(false) + } +} +``` + +### 5j. Update `updateOptions()` + +Replace `connection` with `session`: + +```typescript +updateOptions(options: { + connection?: ConnectionAdapter + session?: SessionAdapter + body?: Record + tools?: ReadonlyArray + // ... callbacks +}): void { + if (options.session !== undefined) { + // Stop current subscription, update adapter, restart + this.subscriptionAbortController?.abort() + this.session = options.session + this.startSubscription() + } else if (options.connection !== undefined) { + this.subscriptionAbortController?.abort() + this.session = createDefaultSession(options.connection) + this.startSubscription() + } + // ... rest unchanged +} +``` + +### 5k. Remove dead code + +Delete: +- `streamResponse()` method +- `processStream()` method +- `private connection: ConnectionAdapter` field +- `private abortController: AbortController | null` field (replaced by subscriptionAbortController) +- `private currentStreamId: string | null` field +- `private currentMessageId: string | null` field +- `private postStreamActions` and `drainPostStreamActions()` (no longer needed — + the subscription loop processes events continuously) +- `queuePostStreamAction()` method + +### 5l. Verify + +Update `chat-client.test.ts`: +- Existing tests use `createMockConnectionAdapter()` which returns a `ConnectionAdapter`. + These should still work because the ChatClient wraps it in `createDefaultSession()`. +- The test assertions about message content, callbacks, loading state should mostly + still pass. Some timing-sensitive tests may need adjustment because `sendMessage` + now resolves at a different point. +- Add new tests: + - ChatClient with explicit `session` option + - Session mode: chunks arrive through subscription + - Agent loop continuation in session mode + +Also update `test-utils.ts`: +- The `createTextChunks` helper should include `TEXT_MESSAGE_START` and + `TEXT_MESSAGE_END` events to match what real servers emit. This is needed + because the processor now relies on these events for message creation. + **Important**: existing tests call `startAssistantMessage()` externally, + so these events need to work with the dedup logic. + +--- + +## Step 6: Export Updates + +**Files**: `packages/typescript/ai-client/src/index.ts` + +Add exports: + +```typescript +export { + createDefaultSession, + type SessionAdapter, +} from './session-adapter' +``` + +--- + +## Step 7: Framework Hook Updates + +**Files**: +- `packages/typescript/ai-react/src/types.ts` +- `packages/typescript/ai-react/src/use-chat.ts` +- Similarly for `ai-solid`, `ai-vue`, `ai-svelte`, `ai-preact` + +### 7a. Update `UseChatOptions` + +The `UseChatOptions` type derives from `ChatClientOptions` via `Omit`. Since +`ChatClientOptions` now includes `session?: SessionAdapter`, it flows through +automatically. No change needed in the React types file. + +### 7b. Update `useChat` hook + +The `useChat` hook passes options to `ChatClient`. Since `session` is now part +of `ChatClientOptions`, it flows through automatically. Verify that: +- `optionsRef.current.session` is passed to the ChatClient constructor +- `updateOptions` propagates session changes + +The existing code passes `connection: optionsRef.current.connection` to the +constructor. Update to also pass `session: optionsRef.current.session`. + +### 7c. Verify + +Run `pnpm test:lib` in `ai-react` and other framework packages. The hook tests +use mock connection adapters, which should still work through the default adapter. + +--- + +## Step 8: Full Test Suite + +Run the complete test suite: + +```bash +pnpm test:lib # Unit tests +pnpm test:types # Type checking +pnpm test:eslint # Linting +pnpm test:build # Build verification +pnpm format # Format code +``` + +Fix any failures before proceeding. + +--- + +## Step 9: PR Description Artifact + +Write the PR description to `docs/proposals/session-stream-pr.md`. + +### PR 1: StreamProcessor per-message state + AG-UI alignment + +#### Title +`feat(ai): per-message stream state and AG-UI type alignment` + +#### Summary +- Refactors StreamProcessor from single-message to per-message state tracking +- Handles `TEXT_MESSAGE_START` / `TEXT_MESSAGE_END` as first-class events +- Adds `MESSAGES_SNAPSHOT` event type and handler for conversation hydration +- Expands `TextMessageStartEvent.role` to support all AG-UI roles +- Adds `parentMessageId` to `ToolCallStartEvent` for message correlation + +#### Motivation +Foundation for durable session support. The StreamProcessor needs to: +- Track multiple concurrent messages (interleaved streams) +- Use messageId from incoming events (not generate its own) +- Hydrate conversation state from snapshots (reconnect/resume) + +#### Breaking Changes +None. All changes are additive. `startAssistantMessage()` continues to work +via backwards-compatibility dedup logic. + +### PR 2: SessionAdapter + ChatClient refactor + +#### Title +`feat: session adapter support for durable chat sessions` + +#### Summary +- SessionAdapter interface (`subscribe()` / `send()`) for persistent sessions +- DefaultSessionAdapter wraps ConnectionAdapter via async queue +- ChatClient unified refactor: all chunk consumption via subscription loop +- Framework hook updates pass through `session` option + +#### Breaking Changes +- `ChatClientOptions.connection` is now optional (was required) +- `sendMessage()` promise resolves when send completes, not when response finishes +- `processStream()` and `streamResponse()` are removed (internal) + +#### Migration Guide +- Existing code using `connection:` continues to work unchanged +- To use session mode: provide `session:` instead of `connection:` + +#### DX Example + +```tsx +import { useChat, fetchServerSentEvents } from '@tanstack/ai-react' + +// Existing usage — unchanged +function BasicChat() { + const { messages } = useChat({ + connection: fetchServerSentEvents('/api/chat'), + }) +} + +// New: with a durable session adapter +import { createDurableSession } from '@durable-streams/tanstack' + +function DurableChat() { + const { messages } = useChat({ + session: createDurableSession({ + proxyUrl: PROXY_URL, + sessionId: 'session-123', + sendUrl: '/api/chat', + connectUrl: '/api/connect', + }), + }) +} +``` + +--- + +## Implementation Notes + +### Backwards Compatibility — `startAssistantMessage()` + `TEXT_MESSAGE_START` + +In the current connection mode, the server (TextEngine) emits `TEXT_MESSAGE_START` +events. Previously these were ignored. Now the processor handles them. But the +`DefaultSessionAdapter` feeds chunks from `connection.connect()` through the +subscription, and the ChatClient no longer calls `startAssistantMessage()`. + +For direct `StreamProcessor` users who still call `startAssistantMessage()`: +- `startAssistantMessage()` creates a message and sets `pendingManualMessageId` +- When `TEXT_MESSAGE_START` arrives, the handler checks `pendingManualMessageId` +- If set, it associates the event with the existing message (no duplicate) +- If the messageId differs, update the message's ID + +### The DefaultSessionAdapter Async Queue + +The queue is a simple producer-consumer pattern (~30 lines). It must handle: +- **Backpressure**: Buffer chunks when subscriber is slower than producer +- **Multiple sends**: Queue chunks from sequential `send()` calls correctly +- **Abort**: Resolve waiting promises with null on abort signal +- **No memory leaks**: Don't accumulate waiters after abort + +### `sendMessage` Promise Semantics + +In connection mode (via DefaultSessionAdapter), `session.send()` awaits the full +`connection.connect()` iteration. So `sendMessage()` resolves when all chunks +have been pushed to the queue (similar to current timing, though finalization +happens asynchronously in the subscription loop). + +In durable session mode, `session.send()` resolves when the HTTP request to the +proxy completes. The actual response streams through the subscription. + +### `isLoading` Management + +`isLoading` is set true in `sendMessage()` / `checkForContinuation()` / `reload()`. +It is set false in the processor's `onStreamEnd` callback, which fires when +`TEXT_MESSAGE_END` or `RUN_FINISHED` is processed. + +Edge case: if `send()` fails (error thrown), `isLoading` is set false in the +catch block. + +### DevTools Events + +The current devtools integration uses `currentStreamId` and `currentMessageId` +which are set in `processStream()`. Since `processStream()` is removed, devtools +events need to be wired differently — either from the subscription loop or from +processor events. This may need a follow-up if devtools integration breaks. + +--- + +## Extensibility: Session State + +The SessionAdapter interface returns `AsyncIterable` where +`StreamChunk = AGUIEvent`. This already includes `STATE_SNAPSHOT`, `STATE_DELTA`, +and `CUSTOM` events in the union type. The transport layer is fully extensible — +any session implementation can emit these events and they flow through the +subscription. + +Adding support for new event types is purely additive: a new `case` branch in +`processChunk()`. The SessionAdapter interface does not change. + +### Planned: Managed `sessionState` container (future PR) + +Based on analysis of the AG-UI spec and real-world use cases (user presence, +agent registration, typing indicators, session metadata), the recommended +approach is a managed state container in the ChatClient: + +1. **`STATE_SNAPSHOT` handler** — store full state object, extract messages if present +2. **`STATE_DELTA` handler** — apply delta (shallow merge initially, JSON Patch later) +3. **`onSessionStateChange` callback** — in `ChatClientOptions` and `StreamProcessorEvents` +4. **`getSessionState()` getter** — on ChatClient +5. **`sessionState` in framework hooks** — as reactive state + +This is "Proposal A" from the session state extensibility analysis. It adds: +- One new callback (`onSessionStateChange`) +- One new getter (`getSessionState()`) +- One new piece of reactive state in hooks (`sessionState`) + +Users who don't use session state pay zero cost. The `Record` +type can later be made generic if demand warrants it. + +Additionally, an `onCustomEvent` callback can forward `CUSTOM` events for +application-specific functionality (typing indicators, participant events, etc.) +without overloading `STATE_SNAPSHOT`. + +### Event support roadmap + +| Event | PR 1 (this PR) | PR 2 (SessionAdapter) | Future PR | +|-------|----------------|----------------------|-----------| +| `TEXT_MESSAGE_START/CONTENT/END` | Handled | — | — | +| `TOOL_CALL_START/ARGS/END` | Handled | — | — | +| `RUN_STARTED/FINISHED/ERROR` | Handled | — | — | +| `STEP_STARTED/FINISHED` | Handled | — | — | +| `CUSTOM` (tool-input, approval) | Handled | — | — | +| `MESSAGES_SNAPSHOT` | Handled | — | — | +| `STATE_SNAPSHOT` | No-op (falls through) | No-op | Managed sessionState | +| `STATE_DELTA` | No-op (falls through) | No-op | Managed sessionState | +| `CUSTOM` (general callback) | — | — | `onCustomEvent` | + +--- + +## Out of Scope (Follow-up PRs) + +- `@durable-streams/tanstack` package (the `createDurableSession()` implementation) +- Server-side changes to TextEngine for session mode +- Per-message `isLoading` tracking (currently global) +- `connectUrl` / snapshot / offset mechanics (lives in durable streams package) +- `STATE_SNAPSHOT` / `STATE_DELTA` processing (managed sessionState — see above) +- `onSessionStateChange` / `onCustomEvent` callbacks +- `sessionState` reactive state in framework hooks +- Documentation updates +- Example app using session mode diff --git a/docs/proposals/session-stream-support.md b/docs/proposals/session-stream-support.md new file mode 100644 index 00000000..24f7cec5 --- /dev/null +++ b/docs/proposals/session-stream-support.md @@ -0,0 +1,523 @@ +# RFC: Durable Session Support for TanStack AI + +> **Status**: Draft +> **Authors**: thruflo +> **Created**: 2026-02-09 +> **Last Updated**: 2026-02-09 + + +## Summary + +This proposal outlines changes to TanStack AI that would enable **durable sessions** - a pattern where a persistent, append-only stream serves as the source of truth for a conversation. + +This enables: + +- **resilience**: tolerate patchy connectivity and tab backgrounding +- **resumability**: reconnect and resume active generations; survives page refreshes and re-renders +- **persistence**: Full conversation history lives in the stream +- **multi-user/agent/tab/device**: Messages from any source appear in real-time + +The proposed changes are backwards compatible and align TanStack AI more closely with the AG-UI protocol specification. + + +## Motivation + +### Current Architecture + +TanStack AI's `ChatClient` and `useChat` hook are built around a **request-response model**: + +1. User calls `sendMessage(content)` +2. Client calls `connection.connect(messages)` +3. Connection adapter makes HTTP request, returns `AsyncIterable` +4. Client processes chunks, builds assistant message +5. Stream ends, client returns to ready state + +This model works well for simple chat UIs but breaks down in several scenarios. + +### Problem 1: No resumability + +If the user refreshes the page mid-generation, or the network drops, the response is lost. There's no way to resume where the stream left off. + +### Problem 2: Single-user assumption + +The current model assumes one user sends a message and receives a response. It doesn't support: + +- multiple users in the same conversation +- multiple AI agents responding +- messages arriving from other tabs/devices +- background agents adding messages asynchronously + +### Problem 3: Tight coupling of send and receive + +`sendMessage()` both writes a user message AND waits for the response stream. For session streams, these should be decoupled: + +- **write**: add message to session (proxy writes to stream) +- **read**: continuously consume from stream (independent of writes) + +### Problem 4: Type limitations + +Current types don't fully align with AG-UI: + +- `TextMessageStartEvent.role` is hardcoded to `'assistant'` +- tool events lack `parentMessageId` for message correlation +- no support for user messages as stream events + + +## Background: Durable session pattern + +### What is a Session Stream? + +A session stream is a durable, append-only log of conversation events. All participants (users, agents) write to the same stream, and all clients consume from it. + +``` +┌──────────────────────────────────────────────────────────────┐ +│ Session Stream │ +│ [user-msg-1] [assistant-chunk] [chunk] [chunk] [run-end] │ +│ [user-msg-2] [assistant-chunk] [tool-call] [chunk] ... │ +└──────────────────────────────────────────────────────────────┘ + ▲ │ + │ write │ consume + │ ▼ + ┌─────────┐ ┌─────────────┐ + │ Client │ │ Client │ + │ A │ │ A, B, C │ + └─────────┘ └─────────────┘ +``` + +### How it differs from request-response + +| Aspect | Request-Response | Durable Session | +|--------|------------------|-----------------| +| Connection | Per-request | Persistent | +| Message source | Only "my" responses | Any participant | +| History | Loaded separately | Consumed from stream | +| Resume | Not possible | Natural (just consume) | +| Send/Receive | Coupled | Decoupled | + +### Durable Streams integration + +The [Durable Streams](https://electric-sql.com/products/durable-streams) project provides infrastructure for this pattern: + +- persistent, addressable streams with reliable delivery +- resumable consumption with offset tracking +- URL-based access with signature renewal +- proxy service that forwards requests and captures responses + + +## Key Insights from analysis + +### 1. StreamProcessor Ignores `messageId` + +The current `StreamProcessor` generates its own message IDs and tracks only a single `currentAssistantMessageId`. It doesn't use the `messageId` from incoming chunks. + +```typescript +// Current behavior - ignores chunk.messageId +startAssistantMessage(): string { + const assistantMessage: UIMessage = { + id: generateMessageId(), // Always generates new ID + role: 'assistant', + // ... + } + this.currentAssistantMessageId = assistantMessage.id +} +``` + +**Impact**: Cannot correlate messages across reconnects or deduplicate. + +### 2. Tool Events Lack Message Correlation + +AG-UI specifies `parentMessageId` on tool call events, but TanStack AI doesn't implement this: + +| Event | AG-UI Spec | TanStack AI | +|-------|-----------|-------------| +| `ToolCallStart.parentMessageId` | Optional | Missing | +| `ToolCallStart.toolCallId` | Required | Implemented | + +**Mitigation**: We can track `toolCallId → messageId` mapping ourselves based on stream order. The `toolCallId` is sufficient to correlate tool call chunks together. When `parentMessageId` is available, we use it; otherwise, we associate tool calls with the most recently active message. + +### 3. AG-UI Supports User Messages + +The AG-UI protocol's `TextMessageStart` event has a `role` field supporting `'user' | 'assistant' | 'system' | 'tool'`. User messages can use standard `TEXT_MESSAGE_*` events - they don't need a custom format. + +```typescript +// AG-UI supports this +{ type: 'TEXT_MESSAGE_START', messageId: 'msg-1', role: 'user', timestamp: ... } +{ type: 'TEXT_MESSAGE_CONTENT', messageId: 'msg-1', delta: 'Hello!', ... } +{ type: 'TEXT_MESSAGE_END', messageId: 'msg-1', timestamp: ... } +``` + +### 4. Multi-Message Interleaving is Manageable + +While true chunk-level interleaving of multiple assistant responses could be complex, in practice: + +- Each LLM response streams sequentially (text, then tool calls) +- Interleaving happens at the response level, not chunk level +- We can track "active message" and associate chunks correctly + +### 5. Message Ordering Should Use Position, Not Timestamps + +Timestamps can have clock skew across clients. The stream itself is ordered. A simple counter based on first-seen order is more reliable: + +```typescript +private messageOrder: Map = new Map() +private orderCounter = 0 + +getOrAssignOrder(messageId: string): number { + if (!this.messageOrder.has(messageId)) { + this.messageOrder.set(messageId, this.orderCounter++) + } + return this.messageOrder.get(messageId)! +} +``` + +### 6. User Message Deduplication + +When a client sends a message optimistically, then sees it echoed back from the stream, we need deduplication: + +1. Client generates `messageId`, adds to local state as "pending" +2. Client sends to proxy with that `messageId` +3. Proxy writes `TEXT_MESSAGE_*` events with that `messageId` to stream +4. Client sees message from stream - if `messageId` matches pending, confirm; else add new + + +## Proposed Changes + +### Phase 1: Type Alignment with AG-UI + +**File**: `packages/typescript/ai/src/types.ts` + +#### 1.1 TextMessageStartEvent Role + +```typescript +// Before +export interface TextMessageStartEvent extends BaseAGUIEvent { + type: 'TEXT_MESSAGE_START' + messageId: string + role: 'assistant' // Hardcoded +} + +// After +export interface TextMessageStartEvent extends BaseAGUIEvent { + type: 'TEXT_MESSAGE_START' + messageId: string + role: 'user' | 'assistant' | 'system' | 'tool' // Align with AG-UI +} +``` + +#### 1.2 Tool Events - Add parentMessageId + +```typescript +// Add to ToolCallStartEvent +export interface ToolCallStartEvent extends BaseAGUIEvent { + type: 'TOOL_CALL_START' + toolCallId: string + toolName: string + parentMessageId?: string // NEW - optional, for message correlation + index?: number +} + +// Similarly for ToolCallArgsEvent and ToolCallEndEvent +``` + +**Rationale**: Aligns with AG-UI specification. Fully backwards compatible. + +--- + +### Phase 2: StreamProcessor Enhancements + +**File**: `packages/typescript/ai/src/activities/chat/stream/processor.ts` + +#### 2.1 Use messageId from Chunks + +```typescript +// Before +startAssistantMessage(): string { + const assistantMessage: UIMessage = { + id: generateMessageId(), + role: 'assistant', + // ... + } +} + +// After - accept optional parameters +startMessage(options?: { + messageId?: string + role?: 'user' | 'assistant' +}): string { + const { + messageId = generateMessageId(), + role = 'assistant' + } = options ?? {} + + const message: UIMessage = { + id: messageId, + role, + parts: [], + createdAt: new Date(), + } + // ... +} +``` + +When processing `TEXT_MESSAGE_START`: + +```typescript +case 'TEXT_MESSAGE_START': + this.startMessage({ + messageId: chunk.messageId, + role: chunk.role + }) + break +``` + +**Rationale**: Enables deduplication and correlation with external systems. Backwards compatible - generates ID if none provided. + +#### 2.2 Multi-Message State Tracking + +```typescript +// Before - single message state +private currentAssistantMessageId: string | null = null +private textContent = '' +private toolCalls: Map = new Map() + +// After - per-message state +private messageStates: Map = new Map() +private activeMessageId: string | null = null // Most recently seen +private toolCallToMessage: Map = new Map() // toolCallId -> messageId + +interface MessageStreamState { + textContent: string + currentSegmentText: string + toolCalls: Map + isComplete: boolean +} +``` + +Chunk processing routes to correct message: + +```typescript +case 'TEXT_MESSAGE_CONTENT': + const state = this.getOrCreateMessageState(chunk.messageId) + state.textContent += chunk.delta + this.activeMessageId = chunk.messageId + this.updateMessageParts(chunk.messageId) + break + +case 'TOOL_CALL_START': + // Prefer explicit parentMessageId, fall back to active message + const messageId = (chunk as any).parentMessageId ?? this.activeMessageId + if (messageId) { + this.toolCallToMessage.set(chunk.toolCallId, messageId) + this.addToolCallToMessage(messageId, chunk) + } + break + +case 'TOOL_CALL_ARGS': + const msgId = this.toolCallToMessage.get(chunk.toolCallId) + if (msgId) { + this.updateToolCallArgs(msgId, chunk) + } + break +``` + +**Rationale**: Enables multi-agent scenarios. Backwards compatible - single message case works identically. + +--- + +### Phase 3: ChatClient Session Support + +**File**: `packages/typescript/ai-client/src/chat-client.ts` + +This phase requires further DX exploration. The core need is to decouple: + +1. **Consuming a stream** (continuous, independent of sends) +2. **Sending messages** (writes to proxy, doesn't wait for response) + +#### Areas to Explore + +##### Option A: Explicit Methods + +```typescript +// New method - consume an external stream +async consumeStream(stream: AsyncIterable): Promise { + for await (const chunk of stream) { + this.callbacksRef.current.onChunk(chunk) + this.processor.processChunk(chunk) + } +} + +// New method - send without expecting response via connection +async sendMessageToStream(content: string): Promise { + const userMessage = this.processor.addUserMessage(content) + this.events.messageSent(userMessage.id, content) + return userMessage +} +``` + +##### Option B: Session-Aware Connection Adapter + +Extend the connection adapter interface: + +```typescript +interface SessionConnectionAdapter extends ConnectionAdapter { + // Subscribe to persistent stream + subscribe(): AsyncIterable + + // Write message (doesn't return response stream) + write(message: UserMessage): Promise + + // URL renewal support + renewUrl?(): Promise +} +``` + +ChatClient detects session adapter and behaves accordingly. + +##### Option C: Always Session-Compatible with Extension Hooks + +Make ChatClient always work in a session-compatible way, with hooks for customization: + +```typescript +interface ChatClientOptions { + // ... existing options + + // Hook: customize how user messages are added (e.g., optimistic + pending state) + onUserMessageCreated?: (message: UIMessage) => UIMessage + + // Hook: customize send behavior (e.g., write to proxy instead of connect) + sendHandler?: (message: UIMessage, messages: UIMessage[]) => Promise + + // Hook: customize how incoming messages are reconciled + reconcileMessage?: (incoming: UIMessage, existing: UIMessage | undefined) => UIMessage +} +``` + +##### Integration with Durable Fetch + +The existing `fetchServerSentEvents` adapter supports a `fetchClient` option for custom fetch implementations. A durable fetch client can intercept requests and route through the proxy: + +```typescript +const durableFetch = createDurableFetch({ proxyUrl: PROXY_URL }) + +const connection = fetchServerSentEvents('/api/chat', { + fetchClient: durableFetch +}) +``` + +The question is how this integrates with session streams: + +1. Does `durableFetch` return a stream URL that the client then subscribes to? +2. How does the subscription lifecycle map to the connection adapter interface? +3. How are user messages written vs. responses consumed? + +**These integration patterns need further exploration.** + +--- + +### Phase 4: useChat Hook Updates + +**File**: `packages/typescript/ai-react/src/use-chat.ts` + +```typescript +interface UseChatOptions { + // ... existing options + + /** + * Session stream to consume. When provided, messages are consumed from + * this stream instead of using request-response flow. + */ + sessionStream?: AsyncIterable + + /** + * Handler for writing messages in session mode. + * Called instead of connection.connect() when sessionStream is provided. + */ + onSendMessage?: (message: UIMessage) => Promise + + /** + * Called when initial history has been loaded from the stream. + */ + onConnected?: (messageCount: number) => void +} +``` + +**Rationale**: Clean session support at hook level. Fully backwards compatible. + +--- + +## Migration Path + +### For Existing Users + +No changes required. All existing code continues to work: + +- `useChat` with `connection` adapter works identically +- Request-response flow unchanged +- Types are backwards compatible (existing `'assistant'` role still valid) + +### For Session Stream Adopters + +1. Update to new version +2. Provide `sessionStream` and `onSendMessage` options +3. Set up durable stream infrastructure (proxy, stream client) + +--- + +## Open Questions + +### DX Design + +1. **consumeStream vs sessionStream option**: Should stream consumption be a method call or a configuration option? + +2. **Integration with connection adapters**: How should session streams integrate with the existing `fetchServerSentEvents` / durable fetch pattern? Is a new adapter type needed? + +3. **Optimistic updates**: How should the client handle optimistic user message display while waiting for stream confirmation? Extension hooks? Built-in pending state? + +4. **Loading state semantics**: With multiple concurrent messages, what does `isLoading` mean? Per-message loading state? + +### Protocol Alignment + +1. **MessagesSnapshot**: Should we support AG-UI's `MessagesSnapshot` event for initial state sync? How does this interact with streaming history? + +2. **Tool execution gating**: In multi-user streams, should we gate auto-tool-execution to only "our" runs? How do we identify message/run origin? + +### Infrastructure + +1. **URL renewal**: How should stream URL renewal (for expiring signatures) be exposed? Callback? Automatic retry? + +2. **Reconnection**: Should the client handle reconnection, or delegate to the stream client library? + +--- + +## Appendix: Current Code References + +### Key Files + +| File | Purpose | +|------|---------| +| `packages/typescript/ai/src/types.ts` | Core types including StreamChunk, AG-UI events | +| `packages/typescript/ai/src/activities/chat/stream/processor.ts` | StreamProcessor - chunk processing state machine | +| `packages/typescript/ai-client/src/chat-client.ts` | ChatClient - orchestrates connection and processor | +| `packages/typescript/ai-client/src/connection-adapters.ts` | Connection adapter implementations | +| `packages/typescript/ai-react/src/use-chat.ts` | React hook integration | + +### Current StreamProcessor Limitations + +1. **Single message tracking**: `currentAssistantMessageId` is singular +2. **ID generation**: Always generates new IDs, ignores chunk messageId +3. **Role assumption**: Hardcodes `'assistant'` role +4. **Tool correlation**: No explicit message-to-tool mapping (relies on temporal ordering) + +### Current ChatClient Limitations + +1. **Coupled send/receive**: `sendMessage` → `streamResponse` → `connect` → `processStream` +2. **Blocking sends**: `isLoading` prevents concurrent sends +3. **No external chunk injection**: Can't feed chunks from external stream + +--- + +## References + +- [AG-UI Protocol Events](https://docs.ag-ui.com/concepts/events) +- [Durable Streams](https://electric-sql.com/products/durable-streams) +- [TanStack AI Connection Adapters](https://tanstack.com/ai/latest/docs/guides/connection-adapters) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 4efa7d49..92cb5c52 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -19,7 +19,6 @@ */ import { generateMessageId, - normalizeToUIMessage, uiMessageToModelMessages, } from '../messages.js' import { defaultJSONParser } from './json-parser' @@ -477,16 +476,16 @@ export class StreamProcessor { this.handleStepFinishedEvent(chunk) break - case 'CUSTOM': - this.handleCustomEvent(chunk) + case 'MESSAGES_SNAPSHOT': + this.handleMessagesSnapshotEvent(chunk) break - case 'STATE_SNAPSHOT': - this.handleStateSnapshotEvent(chunk) + case 'CUSTOM': + this.handleCustomEvent(chunk) break default: - // RUN_STARTED, STEP_STARTED, STATE_DELTA - no special handling needed + // RUN_STARTED, STEP_STARTED, STATE_SNAPSHOT, STATE_DELTA - no special handling needed break } } @@ -532,15 +531,16 @@ export class StreamProcessor { * Used as fallback for events that don't include a messageId. */ private getActiveAssistantMessageId(): string | null { - // Iterate in reverse order of insertion (most recent first) - let lastId: string | null = null - for (const id of this.activeMessageIds) { + // Set iteration is insertion-order; convert to array and search from the end + const ids = Array.from(this.activeMessageIds) + for (let i = ids.length - 1; i >= 0; i--) { + const id = ids[i]! const state = this.messageStates.get(id) - if (state && (state.role === 'assistant')) { - lastId = id + if (state && state.role === 'assistant') { + return id } } - return lastId + return null } /** @@ -577,6 +577,7 @@ export class StreamProcessor { this.activeMessageIds.add(id) this.pendingManualMessageId = id this.events.onStreamStart?.() + this.emitMessagesChange() return { messageId: id, state } } @@ -688,23 +689,13 @@ export class StreamProcessor { } /** - * Handle STATE_SNAPSHOT event + * Handle MESSAGES_SNAPSHOT event */ - private handleStateSnapshotEvent( - chunk: Extract, + private handleMessagesSnapshotEvent( + chunk: Extract, ): void { - const stateMessages = ( - chunk.state as { messages?: Array } - )?.messages - if (Array.isArray(stateMessages)) { - this.messages = stateMessages.map((msg) => - normalizeToUIMessage( - msg as UIMessage, - generateMessageId, - ), - ) - this.emitMessagesChange() - } + this.messages = [...chunk.messages] + this.emitMessagesChange() } /** diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index 0a9741c3..390abfa9 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -702,6 +702,7 @@ export type AGUIEventType = | 'TOOL_CALL_END' | 'STEP_STARTED' | 'STEP_FINISHED' + | 'MESSAGES_SNAPSHOT' | 'STATE_SNAPSHOT' | 'STATE_DELTA' | 'CUSTOM' @@ -871,6 +872,19 @@ export interface StepFinishedEvent extends BaseAGUIEvent { content?: string } +/** + * Emitted to provide a snapshot of all messages in a conversation. + * + * Unlike StateSnapshot (which carries arbitrary application state), + * MessagesSnapshot specifically delivers the conversation transcript. + * This is a first-class AG-UI event type. + */ +export interface MessagesSnapshotEvent extends BaseAGUIEvent { + type: 'MESSAGES_SNAPSHOT' + /** Complete array of messages in the conversation */ + messages: Array +} + /** * Emitted to provide a full state snapshot. */ @@ -915,6 +929,7 @@ export type AGUIEvent = | ToolCallEndEvent | StepStartedEvent | StepFinishedEvent + | MessagesSnapshotEvent | StateSnapshotEvent | StateDeltaEvent | CustomEvent diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index f322bc11..23912974 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -2009,59 +2009,62 @@ describe('StreamProcessor', () => { content: 'Hello from B', }) }) - }) - describe('STATE_SNAPSHOT', () => { - it('should hydrate messages from a state snapshot', () => { - const onMessagesChange = vi.fn() + it('should emit onStreamEnd on finalizeStream (not on TEXT_MESSAGE_END)', () => { + const onStreamEnd = vi.fn() const processor = new StreamProcessor({ - events: { onMessagesChange }, + events: { onStreamEnd }, }) - const snapshotMessages: Array = [ - { - id: 'snap-1', - role: 'user', - parts: [{ type: 'text', content: 'Hello' }], - createdAt: new Date(), - }, - { - id: 'snap-2', - role: 'assistant', - parts: [{ type: 'text', content: 'Hi there!' }], - createdAt: new Date(), - }, - ] + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-a', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) processor.processChunk({ - type: 'STATE_SNAPSHOT', - state: { messages: snapshotMessages }, + type: 'TEXT_MESSAGE_START', + messageId: 'msg-b', + role: 'assistant', timestamp: Date.now(), } as StreamChunk) - const messages = processor.getMessages() - expect(messages).toHaveLength(2) - expect(messages[0]?.id).toBe('snap-1') - expect(messages[0]?.role).toBe('user') - expect(messages[1]?.id).toBe('snap-2') - expect(messages[1]?.role).toBe('assistant') - expect(onMessagesChange).toHaveBeenCalled() - }) + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-a', + delta: 'A', + timestamp: Date.now(), + } as StreamChunk) - it('should ignore STATE_SNAPSHOT without messages', () => { - const onMessagesChange = vi.fn() - const processor = new StreamProcessor({ - events: { onMessagesChange }, - }) + processor.processChunk({ + type: 'TEXT_MESSAGE_END', + messageId: 'msg-a', + timestamp: Date.now(), + } as StreamChunk) + + // TEXT_MESSAGE_END does not fire onStreamEnd + expect(onStreamEnd).not.toHaveBeenCalled() processor.processChunk({ - type: 'STATE_SNAPSHOT', - state: { someOtherData: 'value' }, + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-b', + delta: 'B', timestamp: Date.now(), } as StreamChunk) - expect(processor.getMessages()).toHaveLength(0) - expect(onMessagesChange).not.toHaveBeenCalled() + processor.processChunk({ + type: 'TEXT_MESSAGE_END', + messageId: 'msg-b', + timestamp: Date.now(), + } as StreamChunk) + + // Still not fired + expect(onStreamEnd).not.toHaveBeenCalled() + + // finalizeStream fires onStreamEnd for the last assistant message + processor.finalizeStream() + expect(onStreamEnd).toHaveBeenCalledTimes(1) }) }) @@ -2158,6 +2161,27 @@ describe('StreamProcessor', () => { }) }) + describe('backward compat: ensureAssistantMessage auto-creation', () => { + it('should emit onStreamStart when auto-creating a message from content event', () => { + const onStreamStart = vi.fn() + const processor = new StreamProcessor({ + events: { onStreamStart }, + }) + + // No TEXT_MESSAGE_START or startAssistantMessage — content arrives directly + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'auto-msg', + delta: 'Hello', + timestamp: Date.now(), + } as StreamChunk) + + expect(onStreamStart).toHaveBeenCalledTimes(1) + expect(processor.getMessages()).toHaveLength(1) + expect(processor.getMessages()[0]?.role).toBe('assistant') + }) + }) + describe('backward compat: startAssistantMessage without TEXT_MESSAGE_START', () => { it('should still work when only startAssistantMessage is used', () => { const processor = new StreamProcessor() @@ -2189,6 +2213,71 @@ describe('StreamProcessor', () => { }) }) + describe('MESSAGES_SNAPSHOT', () => { + it('should hydrate messages and emit onMessagesChange', () => { + const onMessagesChange = vi.fn() + const processor = new StreamProcessor({ + events: { onMessagesChange }, + }) + + const snapshotMessages: Array = [ + { + id: 'snap-1', + role: 'user', + parts: [{ type: 'text', content: 'Hello' }], + createdAt: new Date(), + }, + { + id: 'snap-2', + role: 'assistant', + parts: [{ type: 'text', content: 'Hi there!' }], + createdAt: new Date(), + }, + ] + + processor.processChunk({ + type: 'MESSAGES_SNAPSHOT', + messages: snapshotMessages, + timestamp: Date.now(), + } as StreamChunk) + + const messages = processor.getMessages() + expect(messages).toHaveLength(2) + expect(messages[0]?.id).toBe('snap-1') + expect(messages[0]?.role).toBe('user') + expect(messages[1]?.id).toBe('snap-2') + expect(messages[1]?.role).toBe('assistant') + expect(onMessagesChange).toHaveBeenCalled() + }) + + it('should replace existing messages (not append)', () => { + const processor = new StreamProcessor() + + // Add an initial message + processor.addUserMessage('First message') + expect(processor.getMessages()).toHaveLength(1) + + // Snapshot replaces all messages + processor.processChunk({ + type: 'MESSAGES_SNAPSHOT', + messages: [ + { + id: 'snap-1', + role: 'assistant', + parts: [{ type: 'text', content: 'Snapshot content' }], + createdAt: new Date(), + }, + ], + timestamp: Date.now(), + } as StreamChunk) + + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + expect(messages[0]?.id).toBe('snap-1') + expect(messages[0]?.role).toBe('assistant') + }) + }) + describe('per-message tool calls', () => { it('should route tool calls to the correct message via parentMessageId', () => { const processor = new StreamProcessor() From 3f1ccb5e993c61f856184eef05911f0e59791787 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Mon, 9 Feb 2026 17:55:16 -0800 Subject: [PATCH 05/20] feat(ai-client): add SessionAdapter interface and createDefaultSession Co-Authored-By: Claude Opus 4.6 --- packages/typescript/ai-client/src/index.ts | 4 + .../ai-client/src/session-adapter.ts | 83 +++++++ .../ai-client/tests/session-adapter.test.ts | 214 ++++++++++++++++++ 3 files changed, 301 insertions(+) create mode 100644 packages/typescript/ai-client/src/session-adapter.ts create mode 100644 packages/typescript/ai-client/tests/session-adapter.test.ts diff --git a/packages/typescript/ai-client/src/index.ts b/packages/typescript/ai-client/src/index.ts index b279605d..b7a77a47 100644 --- a/packages/typescript/ai-client/src/index.ts +++ b/packages/typescript/ai-client/src/index.ts @@ -30,6 +30,10 @@ export { type ConnectionAdapter, type FetchConnectionOptions, } from './connection-adapters' +export { + createDefaultSession, + type SessionAdapter, +} from './session-adapter' // Re-export message converters from @tanstack/ai export { diff --git a/packages/typescript/ai-client/src/session-adapter.ts b/packages/typescript/ai-client/src/session-adapter.ts new file mode 100644 index 00000000..78a992b4 --- /dev/null +++ b/packages/typescript/ai-client/src/session-adapter.ts @@ -0,0 +1,83 @@ +import type { StreamChunk, UIMessage } from '@tanstack/ai' +import type { ConnectionAdapter } from './connection-adapters' + +/** + * Session adapter interface for persistent stream-based chat sessions. + * + * Unlike ConnectionAdapter (which creates a new stream per request), + * a SessionAdapter maintains a persistent subscription. Responses from + * send() arrive through subscribe(), not as a return value. + * + * The subscribe() stream yields standard AG-UI events (StreamChunk). + * The processor handles whichever event types it supports — currently + * text message lifecycle, tool calls, and MESSAGES_SNAPSHOT. Future + * event handlers (STATE_SNAPSHOT, STATE_DELTA, etc.) are purely additive. + */ +export interface SessionAdapter { + /** + * Subscribe to the session stream. + * Returns an async iterable that yields chunks continuously. + * For durable sessions, this may first yield a MESSAGES_SNAPSHOT + * to hydrate the conversation, then subscribe to the live stream + * from the appropriate offset. + */ + subscribe(signal?: AbortSignal): AsyncIterable + + /** + * Send messages to the session. + * For durable sessions, the proxy writes to the stream and forwards to the API. + * The response arrives through subscribe(), not as a return value. + */ + send( + messages: Array, + data?: Record, + signal?: AbortSignal, + ): Promise +} + +/** + * Wraps a ConnectionAdapter into a SessionAdapter using an async queue pattern. + * send() calls connection.connect() and pushes chunks to the queue. + * subscribe() yields chunks from the queue. + */ +export function createDefaultSession( + connection: ConnectionAdapter, +): SessionAdapter { + const buffer: Array = [] + const waiters: Array<(chunk: StreamChunk | null) => void> = [] + + function push(chunk: StreamChunk): void { + const waiter = waiters.shift() + if (waiter) { + waiter(chunk) + } else { + buffer.push(chunk) + } + } + + return { + async *subscribe(signal?: AbortSignal) { + while (!signal?.aborted) { + let chunk: StreamChunk | null + if (buffer.length > 0) { + chunk = buffer.shift()! + } else { + chunk = await new Promise((resolve) => { + waiters.push(resolve) + signal?.addEventListener('abort', () => resolve(null), { + once: true, + }) + }) + } + if (chunk !== null) yield chunk + } + }, + + async send(messages, data, signal) { + const stream = connection.connect(messages, data, signal) + for await (const chunk of stream) { + push(chunk) + } + }, + } +} diff --git a/packages/typescript/ai-client/tests/session-adapter.test.ts b/packages/typescript/ai-client/tests/session-adapter.test.ts new file mode 100644 index 00000000..11046766 --- /dev/null +++ b/packages/typescript/ai-client/tests/session-adapter.test.ts @@ -0,0 +1,214 @@ +import { describe, expect, it, vi } from 'vitest' +import { createDefaultSession } from '../src/session-adapter' +import { createMockConnectionAdapter, createTextChunks } from './test-utils' +import type { StreamChunk } from '@tanstack/ai' + +describe('createDefaultSession', () => { + it('should yield chunks sent through send() via subscribe()', async () => { + const chunks = createTextChunks('Hi', 'msg-1') + const connection = createMockConnectionAdapter({ chunks }) + const session = createDefaultSession(connection) + + const abortController = new AbortController() + const iterator = session.subscribe(abortController.signal) + + // Send messages — this pushes all chunks into the queue + await session.send([], undefined) + + // Collect chunks from the subscription + const received: Array = [] + for await (const chunk of iterator) { + received.push(chunk) + // Stop after receiving all expected chunks + if (received.length === chunks.length) { + abortController.abort() + } + } + + expect(received).toEqual(chunks) + }) + + it('should deliver chunks from multiple sends in order', async () => { + const chunks1: Array = [ + { + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-1', + model: 'test', + timestamp: Date.now(), + delta: 'A', + content: 'A', + }, + ] + const chunks2: Array = [ + { + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-2', + model: 'test', + timestamp: Date.now(), + delta: 'B', + content: 'B', + }, + ] + + let callCount = 0 + const connection = createMockConnectionAdapter({ + chunks: [], // overridden below + }) + // Override connect to return different chunks per call + connection.connect = function (_messages, _data, _signal) { + callCount++ + const currentChunks = callCount === 1 ? chunks1 : chunks2 + return (async function* () { + for (const chunk of currentChunks) { + yield chunk + } + })() + } + + const session = createDefaultSession(connection) + const abortController = new AbortController() + const iterator = session.subscribe(abortController.signal) + + // Send both in sequence + await session.send([], undefined) + await session.send([], undefined) + + const received: Array = [] + for await (const chunk of iterator) { + received.push(chunk) + if (received.length === 2) { + abortController.abort() + } + } + + expect(received).toEqual([...chunks1, ...chunks2]) + }) + + it('should stop the iterator when the abort signal fires', async () => { + const connection = createMockConnectionAdapter({ chunks: [] }) + const session = createDefaultSession(connection) + + const abortController = new AbortController() + const iterator = session.subscribe(abortController.signal) + + // Abort immediately — the iterator should stop without yielding + abortController.abort() + + const received: Array = [] + for await (const chunk of iterator) { + received.push(chunk) + } + + expect(received).toEqual([]) + }) + + it('should abort a waiting subscriber', async () => { + const connection = createMockConnectionAdapter({ chunks: [] }) + const session = createDefaultSession(connection) + + const abortController = new AbortController() + const iterator = session.subscribe(abortController.signal) + + // Start consuming — this will block waiting for chunks + const resultPromise = (async () => { + const received: Array = [] + for await (const chunk of iterator) { + received.push(chunk) + } + return received + })() + + // Let the subscriber start waiting + await new Promise((resolve) => setTimeout(resolve, 10)) + + // Abort — should unblock the subscriber + abortController.abort() + + const received = await resultPromise + expect(received).toEqual([]) + }) + + it('should propagate errors from connection.connect() through send()', async () => { + const testError = new Error('connection failed') + const connection = createMockConnectionAdapter({ + shouldError: true, + error: testError, + }) + const session = createDefaultSession(connection) + + await expect(session.send([], undefined)).rejects.toThrow( + 'connection failed', + ) + }) + + it('should buffer chunks when subscriber is not yet consuming', async () => { + const chunks = createTextChunks('AB', 'msg-1') + const connection = createMockConnectionAdapter({ chunks }) + const session = createDefaultSession(connection) + + // Send first, before subscribing + await session.send([], undefined) + + // Now subscribe and consume + const abortController = new AbortController() + const iterator = session.subscribe(abortController.signal) + + const received: Array = [] + for await (const chunk of iterator) { + received.push(chunk) + if (received.length === chunks.length) { + abortController.abort() + } + } + + expect(received).toEqual(chunks) + }) + + it('should pass messages and data through to connection.connect()', async () => { + const onConnect = vi.fn() + const connection = createMockConnectionAdapter({ + chunks: [ + { + type: 'RUN_FINISHED', + runId: 'r1', + model: 'test', + timestamp: Date.now(), + finishReason: 'stop', + }, + ], + onConnect, + }) + const session = createDefaultSession(connection) + + const messages = [ + { + id: 'u1', + role: 'user' as const, + parts: [{ type: 'text' as const, content: 'hello' }], + }, + ] + const data = { model: 'gpt-4o' } + + await session.send(messages, data) + + expect(onConnect).toHaveBeenCalledWith( + messages, + data, + undefined, // signal + ) + }) + + it('should pass abort signal from send() to connection.connect()', async () => { + const onConnect = vi.fn() + const connection = createMockConnectionAdapter({ + chunks: [], + onConnect, + }) + const session = createDefaultSession(connection) + + const abortController = new AbortController() + await session.send([], undefined, abortController.signal) + + expect(onConnect).toHaveBeenCalledWith([], undefined, abortController.signal) + }) +}) From 202244a076a1df75c1fa56464b51c4e7fe09f2d1 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Mon, 9 Feb 2026 21:24:06 -0800 Subject: [PATCH 06/20] feat(ai-client): refactor ChatClient to use SessionAdapter subscription model Replace direct ConnectionAdapter usage in ChatClient with a SessionAdapter-based subscription loop. When only a ConnectionAdapter is provided, it is wrapped in a DefaultSessionAdapter internally. This enables persistent session support while preserving existing timing semantics and backwards compatibility. Co-Authored-By: Claude Opus 4.6 --- .../typescript/ai-client/src/chat-client.ts | 176 ++++++++++++------ .../ai-client/src/session-adapter.ts | 3 + packages/typescript/ai-client/src/types.ts | 15 +- .../ai-client/tests/chat-client.test.ts | 6 + packages/typescript/ai-react/src/use-chat.ts | 1 + packages/typescript/ai-solid/src/use-chat.ts | 1 + .../ai-svelte/src/create-chat.svelte.ts | 1 + packages/typescript/ai-vue/src/use-chat.ts | 1 + 8 files changed, 139 insertions(+), 65 deletions(-) diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index 65554a44..fd496c99 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -11,6 +11,8 @@ import type { StreamChunk, } from '@tanstack/ai' import type { ConnectionAdapter } from './connection-adapters' +import type { SessionAdapter } from './session-adapter' +import { createDefaultSession } from './session-adapter' import type { ChatClientEventEmitter } from './events' import type { ChatClientOptions, @@ -23,7 +25,7 @@ import type { export class ChatClient { private processor: StreamProcessor - private connection: ConnectionAdapter + private session!: SessionAdapter private uniqueId: string private body: Record = {} private pendingMessageBody: Record | undefined = undefined @@ -40,6 +42,8 @@ export class ChatClient { private pendingToolExecutions: Map> = new Map() // Flag to deduplicate continuation checks during action draining private continuationPending = false + private subscriptionAbortController: AbortController | null = null + private processingResolve: (() => void) | null = null private callbacksRef: { current: { @@ -57,7 +61,15 @@ export class ChatClient { constructor(options: ChatClientOptions) { this.uniqueId = options.id || this.generateUniqueId('chat') this.body = options.body || {} - this.connection = options.connection + + // Resolve session adapter + if (options.session) { + this.session = options.session + } else if (options.connection) { + this.session = createDefaultSession(options.connection) + } else { + throw new Error('Either connection or session must be provided') + } this.events = new DefaultChatClientEventEmitter(this.uniqueId) // Build client tools map @@ -95,6 +107,9 @@ export class ChatClient { onStreamEnd: (message: UIMessage) => { this.callbacksRef.current.onFinish(message) this.setStatus('ready') + // Resolve the processing-complete promise so streamResponse can continue + this.processingResolve?.() + this.processingResolve = null }, onError: (error: Error) => { this.setError(error) @@ -226,68 +241,60 @@ export class ChatClient { } /** - * Process a stream through the StreamProcessor + * Start the background subscription loop. */ - private async processStream( - source: AsyncIterable, - ): Promise { - // Generate a stream ID for this streaming operation - this.currentStreamId = this.generateUniqueId('stream') + private startSubscription(): void { + this.subscriptionAbortController = new AbortController() + const signal = this.subscriptionAbortController.signal - // Prepare for a new assistant message (created lazily on first content) - this.processor.prepareAssistantMessage() + this.consumeSubscription(signal).catch((err) => { + if (err instanceof Error && err.name !== 'AbortError') { + this.setError(err) + this.setStatus('error') + this.callbacksRef.current.onError(err) + } + // Resolve pending processing so streamResponse doesn't hang + this.processingResolve?.() + this.processingResolve = null + }) + } - // Process each chunk - for await (const chunk of source) { + /** + * Consume chunks from the session subscription. + */ + private async consumeSubscription(signal: AbortSignal): Promise { + const stream = this.session.subscribe(signal) + for await (const chunk of stream) { + if (signal.aborted) break this.callbacksRef.current.onChunk(chunk) this.processor.processChunk(chunk) - - // Track the message ID once the processor lazily creates it - if (!this.currentMessageId) { - const newMessageId = - this.processor.getCurrentAssistantMessageId() ?? null - if (newMessageId) { - this.currentMessageId = newMessageId - // Emit message appended event now that the assistant message exists - const assistantMessage = this.processor - .getMessages() - .find((m: UIMessage) => m.id === newMessageId) - if (assistantMessage) { - this.events.messageAppended( - assistantMessage, - this.currentStreamId || undefined, - ) - } - } - } - - // Yield control back to event loop to allow UI updates + // Yield control back to event loop for UI updates await new Promise((resolve) => setTimeout(resolve, 0)) } + } - // Wait for all pending tool executions to complete before finalizing - // This ensures client tools finish before we check for continuation - if (this.pendingToolExecutions.size > 0) { - await Promise.all(this.pendingToolExecutions.values()) - } - - // Finalize the stream - this.processor.finalizeStream() - - // Get the message ID (may be null if no content arrived) - const messageId = this.processor.getCurrentAssistantMessageId() - - // Clear the current stream and message IDs - this.currentStreamId = null - this.currentMessageId = null - - // Return the assistant message if one was created - if (messageId) { - const messages = this.processor.getMessages() - return messages.find((m: UIMessage) => m.id === messageId) || null + /** + * Ensure subscription loop is running, starting it if needed. + */ + private ensureSubscription(): void { + if ( + !this.subscriptionAbortController || + this.subscriptionAbortController.signal.aborted + ) { + this.startSubscription() } + } - return null + /** + * Create a promise that resolves when onStreamEnd fires. + * Used by streamResponse to await processing completion. + */ + private waitForProcessing(): Promise { + // Resolve any stale promise (e.g., from a previous aborted request) + this.processingResolve?.() + return new Promise((resolve) => { + this.processingResolve = resolve + }) } /** @@ -433,14 +440,44 @@ export class ChatClient { // Clear the pending message body after use this.pendingMessageBody = undefined - // Connect and stream - const stream = this.connection.connect( - messages, - mergedBody, - this.abortController.signal, + // Generate stream ID and start assistant message + this.currentStreamId = this.generateUniqueId('stream') + const messageId = this.processor.startAssistantMessage() + this.currentMessageId = messageId + + const assistantMessage: UIMessage = { + id: messageId, + role: 'assistant', + parts: [], + createdAt: new Date(), + } + this.events.messageAppended( + assistantMessage, + this.currentStreamId || undefined, ) - await this.processStream(stream) + // Ensure subscription loop is running + this.ensureSubscription() + + // Set up promise that resolves when onStreamEnd fires + const processingComplete = this.waitForProcessing() + + // Send through session adapter (pushes chunks to subscription queue) + await this.session.send(messages, mergedBody, this.abortController.signal) + + // Wait for subscription loop to finish processing all chunks + await processingComplete + + // Wait for pending client tool executions + if (this.pendingToolExecutions.size > 0) { + await Promise.all(this.pendingToolExecutions.values()) + } + + // Finalize (idempotent — may already be done by RUN_FINISHED handler) + this.processor.finalizeStream() + + this.currentStreamId = null + this.currentMessageId = null streamCompletedSuccessfully = true } catch (err) { if (err instanceof Error) { @@ -502,10 +539,20 @@ export class ChatClient { * Stop the current stream */ stop(): void { + // Abort any in-flight send if (this.abortController) { this.abortController.abort() this.abortController = null } + + // Abort the subscription loop + this.subscriptionAbortController?.abort() + this.subscriptionAbortController = null + + // Resolve any pending processing promise (unblock streamResponse) + this.processingResolve?.() + this.processingResolve = null + this.setIsLoading(false) this.setStatus('ready') this.events.stopped() @@ -678,6 +725,7 @@ export class ChatClient { */ updateOptions(options: { connection?: ConnectionAdapter + session?: SessionAdapter body?: Record tools?: ReadonlyArray onResponse?: (response?: Response) => void | Promise @@ -685,8 +733,12 @@ export class ChatClient { onFinish?: (message: UIMessage) => void onError?: (error: Error) => void }): void { - if (options.connection !== undefined) { - this.connection = options.connection + if (options.session !== undefined) { + this.subscriptionAbortController?.abort() + this.session = options.session + } else if (options.connection !== undefined) { + this.subscriptionAbortController?.abort() + this.session = createDefaultSession(options.connection) } if (options.body !== undefined) { this.body = options.body diff --git a/packages/typescript/ai-client/src/session-adapter.ts b/packages/typescript/ai-client/src/session-adapter.ts index 78a992b4..93805834 100644 --- a/packages/typescript/ai-client/src/session-adapter.ts +++ b/packages/typescript/ai-client/src/session-adapter.ts @@ -71,6 +71,9 @@ export function createDefaultSession( } if (chunk !== null) yield chunk } + // Discard any chunks buffered after abort to prevent stale data + // leaking into the next subscription + buffer.length = 0 }, async send(messages, data, signal) { diff --git a/packages/typescript/ai-client/src/types.ts b/packages/typescript/ai-client/src/types.ts index 98572548..818b3fd6 100644 --- a/packages/typescript/ai-client/src/types.ts +++ b/packages/typescript/ai-client/src/types.ts @@ -12,6 +12,7 @@ import type { VideoPart, } from '@tanstack/ai' import type { ConnectionAdapter } from './connection-adapters' +import type { SessionAdapter } from './session-adapter' /** * Tool call states - track the lifecycle of a tool call @@ -178,10 +179,18 @@ export interface ChatClientOptions< TTools extends ReadonlyArray = any, > { /** - * Connection adapter for streaming - * Use fetchServerSentEvents(), fetchHttpStream(), or stream() to create adapters + * Connection adapter for streaming (request-response mode). + * Wrapped in a DefaultSessionAdapter internally. + * Provide either `connection` or `session`, not both. */ - connection: ConnectionAdapter + connection?: ConnectionAdapter + + /** + * Session adapter for persistent stream-based sessions. + * When provided, takes over from connection. + * Provide either `connection` or `session`, not both. + */ + session?: SessionAdapter /** * Initial messages to populate the chat diff --git a/packages/typescript/ai-client/tests/chat-client.test.ts b/packages/typescript/ai-client/tests/chat-client.test.ts index 27960378..eaf2a778 100644 --- a/packages/typescript/ai-client/tests/chat-client.test.ts +++ b/packages/typescript/ai-client/tests/chat-client.test.ts @@ -74,6 +74,12 @@ describe('ChatClient', () => { // Message IDs should be unique between clients expect(client1MessageId).not.toBe(client2MessageId) }) + + it('should throw if neither connection nor session is provided', () => { + expect(() => new ChatClient({} as any)).toThrow( + 'Either connection or session must be provided', + ) + }) }) describe('sendMessage', () => { diff --git a/packages/typescript/ai-react/src/use-chat.ts b/packages/typescript/ai-react/src/use-chat.ts index 2cdc02d1..7a4d78e5 100644 --- a/packages/typescript/ai-react/src/use-chat.ts +++ b/packages/typescript/ai-react/src/use-chat.ts @@ -52,6 +52,7 @@ export function useChat = any>( return new ChatClient({ connection: optionsRef.current.connection, + session: optionsRef.current.session, id: clientId, initialMessages: messagesToUse, body: optionsRef.current.body, diff --git a/packages/typescript/ai-solid/src/use-chat.ts b/packages/typescript/ai-solid/src/use-chat.ts index 77d0edf9..206e27f4 100644 --- a/packages/typescript/ai-solid/src/use-chat.ts +++ b/packages/typescript/ai-solid/src/use-chat.ts @@ -35,6 +35,7 @@ export function useChat = any>( const client = createMemo(() => { return new ChatClient({ connection: options.connection, + session: options.session, id: clientId, initialMessages: options.initialMessages, body: options.body, diff --git a/packages/typescript/ai-svelte/src/create-chat.svelte.ts b/packages/typescript/ai-svelte/src/create-chat.svelte.ts index 5354ae11..e4490483 100644 --- a/packages/typescript/ai-svelte/src/create-chat.svelte.ts +++ b/packages/typescript/ai-svelte/src/create-chat.svelte.ts @@ -55,6 +55,7 @@ export function createChat = any>( // Create ChatClient instance const client = new ChatClient({ connection: options.connection, + session: options.session, id: clientId, initialMessages: options.initialMessages, body: options.body, diff --git a/packages/typescript/ai-vue/src/use-chat.ts b/packages/typescript/ai-vue/src/use-chat.ts index 6042fc53..80ac0bb8 100644 --- a/packages/typescript/ai-vue/src/use-chat.ts +++ b/packages/typescript/ai-vue/src/use-chat.ts @@ -25,6 +25,7 @@ export function useChat = any>( // Create ChatClient instance with callbacks to sync state const client = new ChatClient({ connection: options.connection, + session: options.session, id: clientId, initialMessages: options.initialMessages, body: options.body, From d4cc2b14ecbd10ae45b26e779d257af57b9a8814 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Mon, 9 Feb 2026 22:14:48 -0800 Subject: [PATCH 07/20] fix(ai-preact): thread option through. --- packages/typescript/ai-preact/src/use-chat.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/typescript/ai-preact/src/use-chat.ts b/packages/typescript/ai-preact/src/use-chat.ts index cfa9340f..10576277 100644 --- a/packages/typescript/ai-preact/src/use-chat.ts +++ b/packages/typescript/ai-preact/src/use-chat.ts @@ -53,6 +53,7 @@ export function useChat = any>( return new ChatClient({ connection: optionsRef.current.connection, + session: optionsRef.current.session, id: clientId, initialMessages: messagesToUse, body: optionsRef.current.body, From f45dd77c1cad09cb6905c09467bfd9b0a8b826dd Mon Sep 17 00:00:00 2001 From: James Arthur Date: Mon, 9 Feb 2026 22:15:34 -0800 Subject: [PATCH 08/20] fix(ai): finalizeStream when RUN_FINISHED. --- packages/typescript/ai/src/activities/chat/stream/processor.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 92cb5c52..8d946d42 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -975,6 +975,7 @@ export class StreamProcessor { this.finishReason = chunk.finishReason this.isDone = true this.completeAllToolCalls() + this.finalizeStream() } /** From 6d1c7337d28aa02be099e0a51301c5b503e4e082 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Mon, 9 Feb 2026 22:54:34 -0800 Subject: [PATCH 09/20] fix(ai-client): handle reload during active stream with generation counter reload() now cancels the active stream (abort controllers, subscription, processing promise) before starting a new one. A stream generation counter prevents a superseded stream's async cleanup from clobbering the new stream's state (abortController, isLoading, processor). Co-Authored-By: Claude Opus 4.6 --- .../typescript/ai-client/src/chat-client.ts | 71 +++++++++++++------ 1 file changed, 51 insertions(+), 20 deletions(-) diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index fd496c99..650c1a2b 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -44,6 +44,7 @@ export class ChatClient { private continuationPending = false private subscriptionAbortController: AbortController | null = null private processingResolve: (() => void) | null = null + private streamGeneration = 0 private callbacksRef: { current: { @@ -414,6 +415,9 @@ export class ChatClient { return } + // Track generation so a superseded stream's cleanup doesn't clobber the new one + const generation = ++this.streamGeneration + this.setIsLoading(true) this.setStatus('submitted') this.setError(undefined) @@ -468,6 +472,12 @@ export class ChatClient { // Wait for subscription loop to finish processing all chunks await processingComplete + // If this stream was superseded (e.g. by reload()), bail out — + // the new stream owns the processor and processingResolve now. + if (generation !== this.streamGeneration) { + return + } + // Wait for pending client tool executions if (this.pendingToolExecutions.size > 0) { await Promise.all(this.pendingToolExecutions.values()) @@ -484,28 +494,38 @@ export class ChatClient { if (err.name === 'AbortError') { return } - this.setError(err) - this.setStatus('error') - this.callbacksRef.current.onError(err) + if (generation === this.streamGeneration) { + this.setError(err) + this.setStatus('error') + this.callbacksRef.current.onError(err) + } } } finally { - this.abortController = null - this.setIsLoading(false) - this.pendingMessageBody = undefined // Ensure it's cleared even on error - - // Drain any actions that were queued while the stream was in progress - await this.drainPostStreamActions() - - // Continue conversation if the stream ended with a tool result (server tool completed) - if (streamCompletedSuccessfully) { - const messages = this.processor.getMessages() - const lastPart = messages.at(-1)?.parts.at(-1) - - if (lastPart?.type === 'tool-result' && this.shouldAutoSend()) { - try { - await this.checkForContinuation() - } catch (error) { - console.error('Failed to continue flow after tool result:', error) + // Only clean up if this is still the active stream. + // A superseded stream (e.g. reload() started a new one) must not + // clobber the new stream's abortController or isLoading state. + if (generation === this.streamGeneration) { + this.abortController = null + this.setIsLoading(false) + this.pendingMessageBody = undefined // Ensure it's cleared even on error + + // Drain any actions that were queued while the stream was in progress + await this.drainPostStreamActions() + + // Continue conversation if the stream ended with a tool result (server tool completed) + if (streamCompletedSuccessfully) { + const messages = this.processor.getMessages() + const lastPart = messages.at(-1)?.parts.at(-1) + + if (lastPart?.type === 'tool-result' && this.shouldAutoSend()) { + try { + await this.checkForContinuation() + } catch (error) { + console.error( + 'Failed to continue flow after tool result:', + error, + ) + } } } } @@ -526,6 +546,17 @@ export class ChatClient { if (lastUserMessageIndex === -1) return + // Cancel any active stream before reloading + if (this.isLoading) { + this.abortController?.abort() + this.abortController = null + this.subscriptionAbortController?.abort() + this.subscriptionAbortController = null + this.processingResolve?.() + this.processingResolve = null + this.setIsLoading(false) + } + this.events.reloaded(lastUserMessageIndex) // Remove all messages after the last user message From a217426561774c375b2f89949ae7add617011d8e Mon Sep 17 00:00:00 2001 From: James Arthur Date: Mon, 9 Feb 2026 23:07:51 -0800 Subject: [PATCH 10/20] docs: remove proposal docs. --- docs/proposals/resumeable-session-plan.md | 1033 --------------------- docs/proposals/session-stream-support.md | 523 ----------- 2 files changed, 1556 deletions(-) delete mode 100644 docs/proposals/resumeable-session-plan.md delete mode 100644 docs/proposals/session-stream-support.md diff --git a/docs/proposals/resumeable-session-plan.md b/docs/proposals/resumeable-session-plan.md deleted file mode 100644 index 98d687bb..00000000 --- a/docs/proposals/resumeable-session-plan.md +++ /dev/null @@ -1,1033 +0,0 @@ -# Implementation Plan: Resumeable Session Support - -> **Purpose**: Step-by-step guide for implementing resumeable session support in TanStack AI. -> Produces a PR with code changes + a PR description markdown artifact for review. -> -> **Approach**: Unified SessionAdapter (Approach B). The ChatClient always operates -> through a `SessionAdapter` interface. When only a `ConnectionAdapter` is provided, -> it is wrapped in a `DefaultSessionAdapter` internally. -> -> **Design context**: See `docs/proposals/session-stream-support.md` for full rationale. - ---- - -## Progress - -Steps 1–3 are **complete** (with revisions noted inline). The following revisions -were made during implementation: - -1. `getActiveAssistantMessageId` — iterates the Set in reverse via `Array.from()` + - backward loop, returning on first assistant match -2. `onStreamEnd` — fires for every message on `TEXT_MESSAGE_END`, not just the last one -3. `STATE_SNAPSHOT` — handler and tests removed. Event falls through to the default - no-op case. Replaced by `MESSAGES_SNAPSHOT` (see Step 3e) -4. `ensureAssistantMessage` — calls `onStreamStart` and `emitMessagesChange` when it - auto-creates a message - -### PR Boundary - -**PR 1 (this PR)**: Steps 1–3 — StreamProcessor per-message state refactor + -AG-UI type alignment + `MESSAGES_SNAPSHOT` support. - -**PR 2 (follow-up)**: Steps 4–7 — SessionAdapter interface, DefaultSessionAdapter, -ChatClient refactor, framework hook updates. - -**Future PR**: `STATE_SNAPSHOT` / `STATE_DELTA` handling with managed `sessionState` -container (see "Extensibility: Session State" section at the end of this document). - ---- - -## Pre-flight - -Before starting, verify the baseline: - -```bash -cd packages/typescript/ai && pnpm test:lib && cd ../../.. -cd packages/typescript/ai-client && pnpm test:lib && cd ../../.. -cd packages/typescript/ai-react && pnpm test:lib && cd ../../.. -``` - -All existing tests must pass before any changes. - ---- - -## Step 1: AG-UI Type Alignment [DONE] - -**Files**: `packages/typescript/ai/src/types.ts` - -### 1a. Expand `TextMessageStartEvent.role` - -```typescript -// Before -export interface TextMessageStartEvent extends BaseAGUIEvent { - type: 'TEXT_MESSAGE_START' - messageId: string - role: 'assistant' // hardcoded -} - -// After -export interface TextMessageStartEvent extends BaseAGUIEvent { - type: 'TEXT_MESSAGE_START' - messageId: string - role: 'user' | 'assistant' | 'system' | 'tool' -} -``` - -### 1b. Add `parentMessageId` to `ToolCallStartEvent` - -```typescript -export interface ToolCallStartEvent extends BaseAGUIEvent { - type: 'TOOL_CALL_START' - toolCallId: string - toolName: string - parentMessageId?: string // NEW - index?: number -} -``` - -### 1c. Add `MessagesSnapshotEvent` - -AG-UI defines `MessagesSnapshot` as a first-class event type, distinct from -`StateSnapshot`. It delivers a complete history of messages in the current -conversation — used for initializing chat history, synchronizing after connection -interruptions, or hydrating state when a user joins an ongoing conversation. - -```typescript -/** - * Emitted to provide a snapshot of all messages in a conversation. - * - * Unlike StateSnapshot (which carries arbitrary application state), - * MessagesSnapshot specifically delivers the conversation transcript. - * This is a first-class AG-UI event type. - */ -export interface MessagesSnapshotEvent extends BaseAGUIEvent { - type: 'MESSAGES_SNAPSHOT' - /** Complete array of messages in the conversation */ - messages: Array -} -``` - -Add `'MESSAGES_SNAPSHOT'` to the `AGUIEventType` union and `MessagesSnapshotEvent` -to the `AGUIEvent` union (and therefore `StreamChunk`). - -### 1d. Verify - -Run `pnpm test:types` in the `ai` package. These are purely additive type changes -with no behavioral impact. Existing code that sets `role: 'assistant'` still compiles. - ---- - -## Step 2: StreamProcessor — Per-Message State Types [DONE] - -**Files**: `packages/typescript/ai/src/activities/chat/stream/types.ts` - -### 2a. Add `MessageStreamState` interface - -```typescript -/** - * Per-message streaming state. - * Tracks the accumulation of text, tool calls, and thinking content - * for a single message in the stream. - */ -export interface MessageStreamState { - id: string - role: 'user' | 'assistant' | 'system' | 'tool' - totalTextContent: string - currentSegmentText: string - lastEmittedText: string - thinkingContent: string - toolCalls: Map - toolCallOrder: Array - hasToolCallsSinceTextStart: boolean - isComplete: boolean -} -``` - ---- - -## Step 3: StreamProcessor — Refactor to Per-Message State [DONE] - -**Files**: `packages/typescript/ai/src/activities/chat/stream/processor.ts` - -This is the largest change. The existing single-message state variables are replaced -with a `Map` keyed by messageId. - -### 3a. Replace instance variables - -Remove: -``` -- currentAssistantMessageId: string | null -- totalTextContent: string -- currentSegmentText: string -- lastEmittedText: string -- thinkingContent: string -- toolCalls: Map -- toolCallOrder: Array -- hasToolCallsSinceTextStart: boolean -``` - -Add: -``` -- messageStates: Map -- activeMessageIds: Set // messages currently streaming -- toolCallToMessage: Map // toolCallId → messageId -- pendingManualMessageId: string | null // from startAssistantMessage() for compat -``` - -Keep shared: -``` -- finishReason: string | null -- isDone: boolean -``` - -### 3b. Add helper methods - -- `createMessageState(messageId, role): MessageStreamState` — creates and stores state -- `getMessageState(messageId): MessageStreamState | undefined` — lookup by messageId -- `getActiveAssistantMessageId(): string | null` — returns the most recent active - assistant messageId. Iterates `activeMessageIds` in reverse (Set is insertion-order; - convert to array and search backward). Used as fallback for events without messageId - routing. -- `ensureAssistantMessage(preferredId?): { messageId, state }` — finds or auto-creates - an assistant message. Fires `onStreamStart` and `emitMessagesChange` when it - auto-creates (backward compat for streams without `TEXT_MESSAGE_START`). - -### 3c. Handle `TEXT_MESSAGE_START` in `processChunk` - -Currently in the `default:` case (ignored). Move to explicit handler: - -```typescript -case 'TEXT_MESSAGE_START': - this.handleTextMessageStartEvent(chunk) - break -``` - -Handler logic: -1. If `pendingManualMessageId` is set (from `startAssistantMessage()`): - - Associate the manual message with this event's messageId - - Update the message's ID in the messages array if they differ - - Clear `pendingManualMessageId` - - Create `MessageStreamState` for the (now-resolved) messageId -2. If a message with this messageId already exists in messages (dedup): - - Just add to `activeMessageIds` and create state if missing -3. Otherwise: - - Create a new `UIMessage` with the given `messageId` and `role` - - Add to messages array - - Create `MessageStreamState` - - Add to `activeMessageIds` - - Emit `onStreamStart` and `onMessagesChange` - -### 3d. Handle `TEXT_MESSAGE_END` in `processChunk` - -Currently in the `default:` case (ignored). Move to explicit handler: - -```typescript -case 'TEXT_MESSAGE_END': - this.handleTextMessageEndEvent(chunk) - break -``` - -Handler logic: -1. Get the `MessageStreamState` for `chunk.messageId` -2. Emit any pending text for this message -3. Complete all tool calls for this message -4. Mark state as `isComplete = true` -5. Remove from `activeMessageIds` -6. Emit `onStreamEnd` for this message (fires per-message, not only on last) - -### 3e. Handle `MESSAGES_SNAPSHOT` in `processChunk` - -Add explicit handler for the AG-UI `MESSAGES_SNAPSHOT` event: - -```typescript -case 'MESSAGES_SNAPSHOT': - this.handleMessagesSnapshotEvent(chunk) - break -``` - -Handler logic: -1. Set `this.messages` to the snapshot messages (normalize with `normalizeToUIMessage` - if needed, or accept as-is if already in `UIMessage` format) -2. Emit `onMessagesChange` - -This is deliberately minimal. `MESSAGES_SNAPSHOT` is a first-class AG-UI event -for conversation hydration. It does NOT handle arbitrary application state — -that's `STATE_SNAPSHOT` / `STATE_DELTA`, which remain in the default no-op case -and are deferred to a future PR (see "Extensibility: Session State" below). - -**Why this isn't a special case**: `MESSAGES_SNAPSHOT` is a distinct AG-UI event -type with its own shape (`{ messages: Array<...> }`), separate from `StateSnapshot` -(`{ snapshot: Record }`). The SessionAdapter returns -`AsyncIterable` where `StreamChunk = AGUIEvent`. Adding event handlers -is purely additive — each new `case` branch in `processChunk()` handles one more -event type. The adapter interface doesn't change. - -### 3f. Update `TEXT_MESSAGE_CONTENT` handler - -Route by `chunk.messageId`: -1. Get state via `ensureAssistantMessage(chunk.messageId)` — falls back to active - assistant message, or auto-creates one (backward compat) -2. All text accumulation logic stays the same, but operates on the per-message state -3. `emitTextUpdate` receives the messageId to update the correct message - -### 3g. Update `TOOL_CALL_START` handler - -Route by `parentMessageId` or active message: -1. Determine messageId: `chunk.parentMessageId ?? getActiveAssistantMessageId()` -2. Store mapping: `toolCallToMessage.set(chunk.toolCallId, messageId)` -3. Get state for that messageId -4. Rest of logic (create InternalToolCallState, update UIMessage) stays the same - but uses `messageId` from the mapping instead of `currentAssistantMessageId` - -### 3h. Update `TOOL_CALL_ARGS`, `TOOL_CALL_END` handlers - -Route via `toolCallToMessage.get(chunk.toolCallId)` to get the messageId. -Logic stays the same but uses per-message state. - -### 3i. Update `STEP_FINISHED`, `CUSTOM` handlers - -Route to `getActiveAssistantMessageId()`. Logic stays the same. - -### 3j. Update `startAssistantMessage()` for backwards compatibility - -```typescript -startAssistantMessage(messageId?: string): string { - this.resetStreamState() - const id = messageId ?? generateMessageId() - - const assistantMessage: UIMessage = { - id, role: 'assistant', parts: [], createdAt: new Date() - } - - this.messages = [...this.messages, assistantMessage] - this.createMessageState(id, 'assistant') - this.activeMessageIds.add(id) - - // Mark as manually created — TEXT_MESSAGE_START will associate with this - this.pendingManualMessageId = id - - this.events.onStreamStart?.() - this.emitMessagesChange() - return id -} -``` - -### 3k. Update `resetStreamState()` - -Clear `messageStates`, `activeMessageIds`, `toolCallToMessage`, `pendingManualMessageId`. - -### 3l. Update `finalizeStream()` - -Finalize ALL active messages (emit pending text, complete tool calls for each). -Clear `activeMessageIds`. Emit `onStreamEnd` for the last assistant message. - -### 3m. Update `areAllToolsComplete()` - -No change needed — it already looks at the last assistant message's parts in the -messages array, not at internal state. - -### 3n. Verify - -Run existing `stream-processor.test.ts`. All existing tests should pass because -they use `startAssistantMessage()` which creates the per-message state via the -backwards-compat path. - -Add new tests: -- `TEXT_MESSAGE_START` creates a message with correct role and messageId -- `TEXT_MESSAGE_START` with `role: 'user'` creates a user message -- `TEXT_MESSAGE_END` finalizes the message and emits `onStreamEnd` -- `TEXT_MESSAGE_END` emits pending text that was buffered by chunk strategy -- Two interleaved assistant messages (TEXT_MESSAGE_START for msg-a, TEXT_MESSAGE_START - for msg-b, content for msg-a, content for msg-b, END for msg-a, END for msg-b) -- `onStreamEnd` fires for each message that ends (two messages = two calls) -- Dedup: `startAssistantMessage()` followed by `TEXT_MESSAGE_START` with different ID - associates them correctly (single message, not duplicate) -- Dedup: `startAssistantMessage('id')` followed by `TEXT_MESSAGE_START` with same ID -- `TEXT_MESSAGE_START` without prior `startAssistantMessage()` works and fires `onStreamStart` -- `ensureAssistantMessage` auto-creates message and fires `onStreamStart` when content - arrives without prior `TEXT_MESSAGE_START` -- Backward compat: `startAssistantMessage()` without `TEXT_MESSAGE_START` still works -- Tool calls routed via `parentMessageId` -- `MESSAGES_SNAPSHOT` hydrates messages and emits `onMessagesChange` -- `MESSAGES_SNAPSHOT` replaces existing messages (not appends) - ---- - -## Step 4: SessionAdapter Interface + DefaultSessionAdapter - -**Files**: `packages/typescript/ai-client/src/session-adapter.ts` (new file) - -### 4a. Define the `SessionAdapter` interface - -```typescript -import type { StreamChunk, UIMessage } from '@tanstack/ai' -import type { ConnectionAdapter } from './connection-adapters' - -/** - * Session adapter interface for persistent stream-based chat sessions. - * - * Unlike ConnectionAdapter (which creates a new stream per request), - * a SessionAdapter maintains a persistent subscription. Responses from - * send() arrive through subscribe(), not as a return value. - * - * The subscribe() stream yields standard AG-UI events (StreamChunk). - * The processor handles whichever event types it supports — currently - * text message lifecycle, tool calls, and MESSAGES_SNAPSHOT. Future - * event handlers (STATE_SNAPSHOT, STATE_DELTA, etc.) are purely additive. - */ -export interface SessionAdapter { - /** - * Subscribe to the session stream. - * Returns an async iterable that yields chunks continuously. - * For durable sessions, this may first yield a MESSAGES_SNAPSHOT - * to hydrate the conversation, then subscribe to the live stream - * from the appropriate offset. - */ - subscribe(signal?: AbortSignal): AsyncIterable - - /** - * Send messages to the session. - * For durable sessions, the proxy writes to the stream and forwards to the API. - * The response arrives through subscribe(), not as a return value. - */ - send( - messages: Array, - data?: Record, - signal?: AbortSignal, - ): Promise -} -``` - -### 4b. Implement `createDefaultSession()` - -Wraps a `ConnectionAdapter` into a `SessionAdapter` using an async queue pattern. -`send()` calls `connection.connect()` and pushes chunks to the queue. -`subscribe()` yields chunks from the queue. - -```typescript -export function createDefaultSession( - connection: ConnectionAdapter, -): SessionAdapter { - // Async queue: send() pushes chunks, subscribe() yields them - const buffer: Array = [] - const waiters: Array<(chunk: StreamChunk | null) => void> = [] - - function push(chunk: StreamChunk): void { - const waiter = waiters.shift() - if (waiter) { - waiter(chunk) - } else { - buffer.push(chunk) - } - } - - return { - async *subscribe(signal?: AbortSignal) { - while (!signal?.aborted) { - let chunk: StreamChunk | null - if (buffer.length > 0) { - chunk = buffer.shift()! - } else { - chunk = await new Promise((resolve) => { - waiters.push(resolve) - signal?.addEventListener('abort', () => resolve(null), { once: true }) - }) - } - if (chunk !== null) yield chunk - } - }, - - async send(messages, data, signal) { - const stream = connection.connect(messages, data, signal) - for await (const chunk of stream) { - push(chunk) - } - }, - } -} -``` - -### 4c. Add tests for DefaultSessionAdapter - -- Basic: send text chunks -> subscribe yields them -- Multiple sends: chunks from send #1 then send #2 arrive in order -- Abort: aborting the subscribe signal stops the iterator -- Error: errors in connection.connect() propagate through send() - ---- - -## Step 5: ChatClient Refactor - -**Files**: `packages/typescript/ai-client/src/chat-client.ts` - -This is the second-largest change. The `streamResponse()` and `processStream()` -methods are removed. All chunk consumption goes through the subscription loop. - -### 5a. Update `ChatClientOptions` - -In `packages/typescript/ai-client/src/types.ts`: - -```typescript -export interface ChatClientOptions = any> { - /** - * Connection adapter for streaming (request-response mode). - * Wrapped in a DefaultSessionAdapter internally. - * Provide either `connection` or `session`, not both. - */ - connection?: ConnectionAdapter - - /** - * Session adapter for persistent stream-based sessions. - * When provided, takes over from connection. - * Provide either `connection` or `session`, not both. - */ - session?: SessionAdapter - - // ... rest unchanged -} -``` - -Note: `connection` changes from required to optional. This is a breaking type -change but existing code providing `connection` still compiles. - -### 5b. Update ChatClient constructor - -```typescript -constructor(options: ChatClientOptions) { - // Resolve session adapter - if (options.session) { - this.session = options.session - } else if (options.connection) { - this.session = createDefaultSession(options.connection) - } else { - throw new Error('Either connection or session must be provided') - } - - // ... existing setup (processor, callbacks, tools) ... - - // Start subscription - this.startSubscription() -} -``` - -New instance variables: -```typescript -private session: SessionAdapter -private subscriptionAbortController: AbortController | null = null -``` - -Remove: -```typescript -private connection: ConnectionAdapter // replaced by session -``` - -### 5c. Add `startSubscription()` method - -```typescript -private startSubscription(): void { - this.subscriptionAbortController = new AbortController() - const signal = this.subscriptionAbortController.signal - - // Run subscription in background (don't await in constructor) - this.consumeSubscription(signal).catch((err) => { - if (err instanceof Error && err.name !== 'AbortError') { - this.setError(err) - this.setStatus('error') - this.callbacksRef.current.onError(err) - } - }) -} - -private async consumeSubscription(signal: AbortSignal): Promise { - const stream = this.session.subscribe(signal) - for await (const chunk of stream) { - if (signal.aborted) break - this.callbacksRef.current.onChunk(chunk) - this.processor.processChunk(chunk) - await new Promise((resolve) => setTimeout(resolve, 0)) - } -} -``` - -### 5d. Rewrite `sendMessage()` - -Remove the `streamResponse()` call. Instead, send through the session adapter: - -```typescript -async sendMessage(content: string | MultimodalContent, body?: Record): Promise { - const emptyMessage = typeof content === 'string' && !content.trim() - if (emptyMessage || this.isLoading) return - - const normalizedContent = this.normalizeMessageInput(content) - this.pendingMessageBody = body - - // Add user message optimistically - const userMessage = this.processor.addUserMessage( - normalizedContent.content, - normalizedContent.id, - ) - this.events.messageSent(userMessage.id, normalizedContent.content) - - // Send through session adapter - this.setIsLoading(true) - this.setStatus('submitted') - this.setError(undefined) - - try { - const mergedBody = { - ...this.body, - ...this.pendingMessageBody, - conversationId: this.uniqueId, - } - this.pendingMessageBody = undefined - - await this.session.send(this.processor.getMessages(), mergedBody) - } catch (err) { - if (err instanceof Error) { - if (err.name === 'AbortError') return - this.setError(err) - this.setStatus('error') - this.callbacksRef.current.onError(err) - } - this.setIsLoading(false) - } -} -``` - -**Key difference**: `sendMessage` resolves when `session.send()` completes -(HTTP request done), not when the response finishes streaming. The response -arrives through the subscription. `isLoading` is set to false by the processor's -`onStreamEnd` event (wired up in the constructor callbacks). - -### 5e. Wire processor events to isLoading - -Update the processor event wiring in the constructor: - -```typescript -onStreamStart: () => { - this.setStatus('streaming') - // In session mode, streaming status already set via sendMessage -}, -onStreamEnd: (message: UIMessage) => { - this.callbacksRef.current.onFinish(message) - this.setIsLoading(false) // NEW: reset loading when generation ends - this.setStatus('ready') - - // Check for continuation (agent loop) - this.checkForContinuation().catch(console.error) -}, -``` - -### 5f. Rewrite `checkForContinuation()` - -```typescript -private async checkForContinuation(): Promise { - if (this.continuationPending) return - if (!this.shouldAutoSend()) return - - this.continuationPending = true - try { - this.setIsLoading(true) - this.setStatus('submitted') - await this.session.send(this.processor.getMessages(), { - ...this.body, - conversationId: this.uniqueId, - }) - } catch (err) { - if (err instanceof Error && err.name !== 'AbortError') { - this.setError(err) - this.setStatus('error') - this.callbacksRef.current.onError(err) - } - this.setIsLoading(false) - } finally { - this.continuationPending = false - } -} -``` - -### 5g. Simplify `stop()` - -```typescript -stop(): void { - this.subscriptionAbortController?.abort() - this.subscriptionAbortController = null - this.setIsLoading(false) - this.setStatus('ready') - this.events.stopped() -} -``` - -### 5h. Update `reload()` - -```typescript -async reload(): Promise { - const messages = this.processor.getMessages() - if (messages.length === 0) return - - const lastUserMessageIndex = messages.findLastIndex(m => m.role === 'user') - if (lastUserMessageIndex === -1) return - - this.events.reloaded(lastUserMessageIndex) - this.processor.removeMessagesAfter(lastUserMessageIndex) - - // Send through session adapter - this.setIsLoading(true) - this.setStatus('submitted') - try { - await this.session.send(this.processor.getMessages(), { - ...this.body, - conversationId: this.uniqueId, - }) - } catch (err) { - if (err instanceof Error && err.name !== 'AbortError') { - this.setError(err) - this.setStatus('error') - this.callbacksRef.current.onError(err) - } - this.setIsLoading(false) - } -} -``` - -### 5i. Update `append()` - -```typescript -async append(message: UIMessage | ModelMessage): Promise { - const normalizedMessage = normalizeToUIMessage(message, generateMessageId) - if (normalizedMessage.role === 'system') return - - const uiMessage = normalizedMessage as UIMessage - this.events.messageAppended(uiMessage) - - const messages = this.processor.getMessages() - this.processor.setMessages([...messages, uiMessage]) - - this.setIsLoading(true) - this.setStatus('submitted') - try { - await this.session.send(this.processor.getMessages(), { - ...this.body, - conversationId: this.uniqueId, - }) - } catch (err) { - if (err instanceof Error && err.name !== 'AbortError') { - this.setError(err) - this.setStatus('error') - this.callbacksRef.current.onError(err) - } - this.setIsLoading(false) - } -} -``` - -### 5j. Update `updateOptions()` - -Replace `connection` with `session`: - -```typescript -updateOptions(options: { - connection?: ConnectionAdapter - session?: SessionAdapter - body?: Record - tools?: ReadonlyArray - // ... callbacks -}): void { - if (options.session !== undefined) { - // Stop current subscription, update adapter, restart - this.subscriptionAbortController?.abort() - this.session = options.session - this.startSubscription() - } else if (options.connection !== undefined) { - this.subscriptionAbortController?.abort() - this.session = createDefaultSession(options.connection) - this.startSubscription() - } - // ... rest unchanged -} -``` - -### 5k. Remove dead code - -Delete: -- `streamResponse()` method -- `processStream()` method -- `private connection: ConnectionAdapter` field -- `private abortController: AbortController | null` field (replaced by subscriptionAbortController) -- `private currentStreamId: string | null` field -- `private currentMessageId: string | null` field -- `private postStreamActions` and `drainPostStreamActions()` (no longer needed — - the subscription loop processes events continuously) -- `queuePostStreamAction()` method - -### 5l. Verify - -Update `chat-client.test.ts`: -- Existing tests use `createMockConnectionAdapter()` which returns a `ConnectionAdapter`. - These should still work because the ChatClient wraps it in `createDefaultSession()`. -- The test assertions about message content, callbacks, loading state should mostly - still pass. Some timing-sensitive tests may need adjustment because `sendMessage` - now resolves at a different point. -- Add new tests: - - ChatClient with explicit `session` option - - Session mode: chunks arrive through subscription - - Agent loop continuation in session mode - -Also update `test-utils.ts`: -- The `createTextChunks` helper should include `TEXT_MESSAGE_START` and - `TEXT_MESSAGE_END` events to match what real servers emit. This is needed - because the processor now relies on these events for message creation. - **Important**: existing tests call `startAssistantMessage()` externally, - so these events need to work with the dedup logic. - ---- - -## Step 6: Export Updates - -**Files**: `packages/typescript/ai-client/src/index.ts` - -Add exports: - -```typescript -export { - createDefaultSession, - type SessionAdapter, -} from './session-adapter' -``` - ---- - -## Step 7: Framework Hook Updates - -**Files**: -- `packages/typescript/ai-react/src/types.ts` -- `packages/typescript/ai-react/src/use-chat.ts` -- Similarly for `ai-solid`, `ai-vue`, `ai-svelte`, `ai-preact` - -### 7a. Update `UseChatOptions` - -The `UseChatOptions` type derives from `ChatClientOptions` via `Omit`. Since -`ChatClientOptions` now includes `session?: SessionAdapter`, it flows through -automatically. No change needed in the React types file. - -### 7b. Update `useChat` hook - -The `useChat` hook passes options to `ChatClient`. Since `session` is now part -of `ChatClientOptions`, it flows through automatically. Verify that: -- `optionsRef.current.session` is passed to the ChatClient constructor -- `updateOptions` propagates session changes - -The existing code passes `connection: optionsRef.current.connection` to the -constructor. Update to also pass `session: optionsRef.current.session`. - -### 7c. Verify - -Run `pnpm test:lib` in `ai-react` and other framework packages. The hook tests -use mock connection adapters, which should still work through the default adapter. - ---- - -## Step 8: Full Test Suite - -Run the complete test suite: - -```bash -pnpm test:lib # Unit tests -pnpm test:types # Type checking -pnpm test:eslint # Linting -pnpm test:build # Build verification -pnpm format # Format code -``` - -Fix any failures before proceeding. - ---- - -## Step 9: PR Description Artifact - -Write the PR description to `docs/proposals/session-stream-pr.md`. - -### PR 1: StreamProcessor per-message state + AG-UI alignment - -#### Title -`feat(ai): per-message stream state and AG-UI type alignment` - -#### Summary -- Refactors StreamProcessor from single-message to per-message state tracking -- Handles `TEXT_MESSAGE_START` / `TEXT_MESSAGE_END` as first-class events -- Adds `MESSAGES_SNAPSHOT` event type and handler for conversation hydration -- Expands `TextMessageStartEvent.role` to support all AG-UI roles -- Adds `parentMessageId` to `ToolCallStartEvent` for message correlation - -#### Motivation -Foundation for durable session support. The StreamProcessor needs to: -- Track multiple concurrent messages (interleaved streams) -- Use messageId from incoming events (not generate its own) -- Hydrate conversation state from snapshots (reconnect/resume) - -#### Breaking Changes -None. All changes are additive. `startAssistantMessage()` continues to work -via backwards-compatibility dedup logic. - -### PR 2: SessionAdapter + ChatClient refactor - -#### Title -`feat: session adapter support for durable chat sessions` - -#### Summary -- SessionAdapter interface (`subscribe()` / `send()`) for persistent sessions -- DefaultSessionAdapter wraps ConnectionAdapter via async queue -- ChatClient unified refactor: all chunk consumption via subscription loop -- Framework hook updates pass through `session` option - -#### Breaking Changes -- `ChatClientOptions.connection` is now optional (was required) -- `sendMessage()` promise resolves when send completes, not when response finishes -- `processStream()` and `streamResponse()` are removed (internal) - -#### Migration Guide -- Existing code using `connection:` continues to work unchanged -- To use session mode: provide `session:` instead of `connection:` - -#### DX Example - -```tsx -import { useChat, fetchServerSentEvents } from '@tanstack/ai-react' - -// Existing usage — unchanged -function BasicChat() { - const { messages } = useChat({ - connection: fetchServerSentEvents('/api/chat'), - }) -} - -// New: with a durable session adapter -import { createDurableSession } from '@durable-streams/tanstack' - -function DurableChat() { - const { messages } = useChat({ - session: createDurableSession({ - proxyUrl: PROXY_URL, - sessionId: 'session-123', - sendUrl: '/api/chat', - connectUrl: '/api/connect', - }), - }) -} -``` - ---- - -## Implementation Notes - -### Backwards Compatibility — `startAssistantMessage()` + `TEXT_MESSAGE_START` - -In the current connection mode, the server (TextEngine) emits `TEXT_MESSAGE_START` -events. Previously these were ignored. Now the processor handles them. But the -`DefaultSessionAdapter` feeds chunks from `connection.connect()` through the -subscription, and the ChatClient no longer calls `startAssistantMessage()`. - -For direct `StreamProcessor` users who still call `startAssistantMessage()`: -- `startAssistantMessage()` creates a message and sets `pendingManualMessageId` -- When `TEXT_MESSAGE_START` arrives, the handler checks `pendingManualMessageId` -- If set, it associates the event with the existing message (no duplicate) -- If the messageId differs, update the message's ID - -### The DefaultSessionAdapter Async Queue - -The queue is a simple producer-consumer pattern (~30 lines). It must handle: -- **Backpressure**: Buffer chunks when subscriber is slower than producer -- **Multiple sends**: Queue chunks from sequential `send()` calls correctly -- **Abort**: Resolve waiting promises with null on abort signal -- **No memory leaks**: Don't accumulate waiters after abort - -### `sendMessage` Promise Semantics - -In connection mode (via DefaultSessionAdapter), `session.send()` awaits the full -`connection.connect()` iteration. So `sendMessage()` resolves when all chunks -have been pushed to the queue (similar to current timing, though finalization -happens asynchronously in the subscription loop). - -In durable session mode, `session.send()` resolves when the HTTP request to the -proxy completes. The actual response streams through the subscription. - -### `isLoading` Management - -`isLoading` is set true in `sendMessage()` / `checkForContinuation()` / `reload()`. -It is set false in the processor's `onStreamEnd` callback, which fires when -`TEXT_MESSAGE_END` or `RUN_FINISHED` is processed. - -Edge case: if `send()` fails (error thrown), `isLoading` is set false in the -catch block. - -### DevTools Events - -The current devtools integration uses `currentStreamId` and `currentMessageId` -which are set in `processStream()`. Since `processStream()` is removed, devtools -events need to be wired differently — either from the subscription loop or from -processor events. This may need a follow-up if devtools integration breaks. - ---- - -## Extensibility: Session State - -The SessionAdapter interface returns `AsyncIterable` where -`StreamChunk = AGUIEvent`. This already includes `STATE_SNAPSHOT`, `STATE_DELTA`, -and `CUSTOM` events in the union type. The transport layer is fully extensible — -any session implementation can emit these events and they flow through the -subscription. - -Adding support for new event types is purely additive: a new `case` branch in -`processChunk()`. The SessionAdapter interface does not change. - -### Planned: Managed `sessionState` container (future PR) - -Based on analysis of the AG-UI spec and real-world use cases (user presence, -agent registration, typing indicators, session metadata), the recommended -approach is a managed state container in the ChatClient: - -1. **`STATE_SNAPSHOT` handler** — store full state object, extract messages if present -2. **`STATE_DELTA` handler** — apply delta (shallow merge initially, JSON Patch later) -3. **`onSessionStateChange` callback** — in `ChatClientOptions` and `StreamProcessorEvents` -4. **`getSessionState()` getter** — on ChatClient -5. **`sessionState` in framework hooks** — as reactive state - -This is "Proposal A" from the session state extensibility analysis. It adds: -- One new callback (`onSessionStateChange`) -- One new getter (`getSessionState()`) -- One new piece of reactive state in hooks (`sessionState`) - -Users who don't use session state pay zero cost. The `Record` -type can later be made generic if demand warrants it. - -Additionally, an `onCustomEvent` callback can forward `CUSTOM` events for -application-specific functionality (typing indicators, participant events, etc.) -without overloading `STATE_SNAPSHOT`. - -### Event support roadmap - -| Event | PR 1 (this PR) | PR 2 (SessionAdapter) | Future PR | -|-------|----------------|----------------------|-----------| -| `TEXT_MESSAGE_START/CONTENT/END` | Handled | — | — | -| `TOOL_CALL_START/ARGS/END` | Handled | — | — | -| `RUN_STARTED/FINISHED/ERROR` | Handled | — | — | -| `STEP_STARTED/FINISHED` | Handled | — | — | -| `CUSTOM` (tool-input, approval) | Handled | — | — | -| `MESSAGES_SNAPSHOT` | Handled | — | — | -| `STATE_SNAPSHOT` | No-op (falls through) | No-op | Managed sessionState | -| `STATE_DELTA` | No-op (falls through) | No-op | Managed sessionState | -| `CUSTOM` (general callback) | — | — | `onCustomEvent` | - ---- - -## Out of Scope (Follow-up PRs) - -- `@durable-streams/tanstack` package (the `createDurableSession()` implementation) -- Server-side changes to TextEngine for session mode -- Per-message `isLoading` tracking (currently global) -- `connectUrl` / snapshot / offset mechanics (lives in durable streams package) -- `STATE_SNAPSHOT` / `STATE_DELTA` processing (managed sessionState — see above) -- `onSessionStateChange` / `onCustomEvent` callbacks -- `sessionState` reactive state in framework hooks -- Documentation updates -- Example app using session mode diff --git a/docs/proposals/session-stream-support.md b/docs/proposals/session-stream-support.md deleted file mode 100644 index 24f7cec5..00000000 --- a/docs/proposals/session-stream-support.md +++ /dev/null @@ -1,523 +0,0 @@ -# RFC: Durable Session Support for TanStack AI - -> **Status**: Draft -> **Authors**: thruflo -> **Created**: 2026-02-09 -> **Last Updated**: 2026-02-09 - - -## Summary - -This proposal outlines changes to TanStack AI that would enable **durable sessions** - a pattern where a persistent, append-only stream serves as the source of truth for a conversation. - -This enables: - -- **resilience**: tolerate patchy connectivity and tab backgrounding -- **resumability**: reconnect and resume active generations; survives page refreshes and re-renders -- **persistence**: Full conversation history lives in the stream -- **multi-user/agent/tab/device**: Messages from any source appear in real-time - -The proposed changes are backwards compatible and align TanStack AI more closely with the AG-UI protocol specification. - - -## Motivation - -### Current Architecture - -TanStack AI's `ChatClient` and `useChat` hook are built around a **request-response model**: - -1. User calls `sendMessage(content)` -2. Client calls `connection.connect(messages)` -3. Connection adapter makes HTTP request, returns `AsyncIterable` -4. Client processes chunks, builds assistant message -5. Stream ends, client returns to ready state - -This model works well for simple chat UIs but breaks down in several scenarios. - -### Problem 1: No resumability - -If the user refreshes the page mid-generation, or the network drops, the response is lost. There's no way to resume where the stream left off. - -### Problem 2: Single-user assumption - -The current model assumes one user sends a message and receives a response. It doesn't support: - -- multiple users in the same conversation -- multiple AI agents responding -- messages arriving from other tabs/devices -- background agents adding messages asynchronously - -### Problem 3: Tight coupling of send and receive - -`sendMessage()` both writes a user message AND waits for the response stream. For session streams, these should be decoupled: - -- **write**: add message to session (proxy writes to stream) -- **read**: continuously consume from stream (independent of writes) - -### Problem 4: Type limitations - -Current types don't fully align with AG-UI: - -- `TextMessageStartEvent.role` is hardcoded to `'assistant'` -- tool events lack `parentMessageId` for message correlation -- no support for user messages as stream events - - -## Background: Durable session pattern - -### What is a Session Stream? - -A session stream is a durable, append-only log of conversation events. All participants (users, agents) write to the same stream, and all clients consume from it. - -``` -┌──────────────────────────────────────────────────────────────┐ -│ Session Stream │ -│ [user-msg-1] [assistant-chunk] [chunk] [chunk] [run-end] │ -│ [user-msg-2] [assistant-chunk] [tool-call] [chunk] ... │ -└──────────────────────────────────────────────────────────────┘ - ▲ │ - │ write │ consume - │ ▼ - ┌─────────┐ ┌─────────────┐ - │ Client │ │ Client │ - │ A │ │ A, B, C │ - └─────────┘ └─────────────┘ -``` - -### How it differs from request-response - -| Aspect | Request-Response | Durable Session | -|--------|------------------|-----------------| -| Connection | Per-request | Persistent | -| Message source | Only "my" responses | Any participant | -| History | Loaded separately | Consumed from stream | -| Resume | Not possible | Natural (just consume) | -| Send/Receive | Coupled | Decoupled | - -### Durable Streams integration - -The [Durable Streams](https://electric-sql.com/products/durable-streams) project provides infrastructure for this pattern: - -- persistent, addressable streams with reliable delivery -- resumable consumption with offset tracking -- URL-based access with signature renewal -- proxy service that forwards requests and captures responses - - -## Key Insights from analysis - -### 1. StreamProcessor Ignores `messageId` - -The current `StreamProcessor` generates its own message IDs and tracks only a single `currentAssistantMessageId`. It doesn't use the `messageId` from incoming chunks. - -```typescript -// Current behavior - ignores chunk.messageId -startAssistantMessage(): string { - const assistantMessage: UIMessage = { - id: generateMessageId(), // Always generates new ID - role: 'assistant', - // ... - } - this.currentAssistantMessageId = assistantMessage.id -} -``` - -**Impact**: Cannot correlate messages across reconnects or deduplicate. - -### 2. Tool Events Lack Message Correlation - -AG-UI specifies `parentMessageId` on tool call events, but TanStack AI doesn't implement this: - -| Event | AG-UI Spec | TanStack AI | -|-------|-----------|-------------| -| `ToolCallStart.parentMessageId` | Optional | Missing | -| `ToolCallStart.toolCallId` | Required | Implemented | - -**Mitigation**: We can track `toolCallId → messageId` mapping ourselves based on stream order. The `toolCallId` is sufficient to correlate tool call chunks together. When `parentMessageId` is available, we use it; otherwise, we associate tool calls with the most recently active message. - -### 3. AG-UI Supports User Messages - -The AG-UI protocol's `TextMessageStart` event has a `role` field supporting `'user' | 'assistant' | 'system' | 'tool'`. User messages can use standard `TEXT_MESSAGE_*` events - they don't need a custom format. - -```typescript -// AG-UI supports this -{ type: 'TEXT_MESSAGE_START', messageId: 'msg-1', role: 'user', timestamp: ... } -{ type: 'TEXT_MESSAGE_CONTENT', messageId: 'msg-1', delta: 'Hello!', ... } -{ type: 'TEXT_MESSAGE_END', messageId: 'msg-1', timestamp: ... } -``` - -### 4. Multi-Message Interleaving is Manageable - -While true chunk-level interleaving of multiple assistant responses could be complex, in practice: - -- Each LLM response streams sequentially (text, then tool calls) -- Interleaving happens at the response level, not chunk level -- We can track "active message" and associate chunks correctly - -### 5. Message Ordering Should Use Position, Not Timestamps - -Timestamps can have clock skew across clients. The stream itself is ordered. A simple counter based on first-seen order is more reliable: - -```typescript -private messageOrder: Map = new Map() -private orderCounter = 0 - -getOrAssignOrder(messageId: string): number { - if (!this.messageOrder.has(messageId)) { - this.messageOrder.set(messageId, this.orderCounter++) - } - return this.messageOrder.get(messageId)! -} -``` - -### 6. User Message Deduplication - -When a client sends a message optimistically, then sees it echoed back from the stream, we need deduplication: - -1. Client generates `messageId`, adds to local state as "pending" -2. Client sends to proxy with that `messageId` -3. Proxy writes `TEXT_MESSAGE_*` events with that `messageId` to stream -4. Client sees message from stream - if `messageId` matches pending, confirm; else add new - - -## Proposed Changes - -### Phase 1: Type Alignment with AG-UI - -**File**: `packages/typescript/ai/src/types.ts` - -#### 1.1 TextMessageStartEvent Role - -```typescript -// Before -export interface TextMessageStartEvent extends BaseAGUIEvent { - type: 'TEXT_MESSAGE_START' - messageId: string - role: 'assistant' // Hardcoded -} - -// After -export interface TextMessageStartEvent extends BaseAGUIEvent { - type: 'TEXT_MESSAGE_START' - messageId: string - role: 'user' | 'assistant' | 'system' | 'tool' // Align with AG-UI -} -``` - -#### 1.2 Tool Events - Add parentMessageId - -```typescript -// Add to ToolCallStartEvent -export interface ToolCallStartEvent extends BaseAGUIEvent { - type: 'TOOL_CALL_START' - toolCallId: string - toolName: string - parentMessageId?: string // NEW - optional, for message correlation - index?: number -} - -// Similarly for ToolCallArgsEvent and ToolCallEndEvent -``` - -**Rationale**: Aligns with AG-UI specification. Fully backwards compatible. - ---- - -### Phase 2: StreamProcessor Enhancements - -**File**: `packages/typescript/ai/src/activities/chat/stream/processor.ts` - -#### 2.1 Use messageId from Chunks - -```typescript -// Before -startAssistantMessage(): string { - const assistantMessage: UIMessage = { - id: generateMessageId(), - role: 'assistant', - // ... - } -} - -// After - accept optional parameters -startMessage(options?: { - messageId?: string - role?: 'user' | 'assistant' -}): string { - const { - messageId = generateMessageId(), - role = 'assistant' - } = options ?? {} - - const message: UIMessage = { - id: messageId, - role, - parts: [], - createdAt: new Date(), - } - // ... -} -``` - -When processing `TEXT_MESSAGE_START`: - -```typescript -case 'TEXT_MESSAGE_START': - this.startMessage({ - messageId: chunk.messageId, - role: chunk.role - }) - break -``` - -**Rationale**: Enables deduplication and correlation with external systems. Backwards compatible - generates ID if none provided. - -#### 2.2 Multi-Message State Tracking - -```typescript -// Before - single message state -private currentAssistantMessageId: string | null = null -private textContent = '' -private toolCalls: Map = new Map() - -// After - per-message state -private messageStates: Map = new Map() -private activeMessageId: string | null = null // Most recently seen -private toolCallToMessage: Map = new Map() // toolCallId -> messageId - -interface MessageStreamState { - textContent: string - currentSegmentText: string - toolCalls: Map - isComplete: boolean -} -``` - -Chunk processing routes to correct message: - -```typescript -case 'TEXT_MESSAGE_CONTENT': - const state = this.getOrCreateMessageState(chunk.messageId) - state.textContent += chunk.delta - this.activeMessageId = chunk.messageId - this.updateMessageParts(chunk.messageId) - break - -case 'TOOL_CALL_START': - // Prefer explicit parentMessageId, fall back to active message - const messageId = (chunk as any).parentMessageId ?? this.activeMessageId - if (messageId) { - this.toolCallToMessage.set(chunk.toolCallId, messageId) - this.addToolCallToMessage(messageId, chunk) - } - break - -case 'TOOL_CALL_ARGS': - const msgId = this.toolCallToMessage.get(chunk.toolCallId) - if (msgId) { - this.updateToolCallArgs(msgId, chunk) - } - break -``` - -**Rationale**: Enables multi-agent scenarios. Backwards compatible - single message case works identically. - ---- - -### Phase 3: ChatClient Session Support - -**File**: `packages/typescript/ai-client/src/chat-client.ts` - -This phase requires further DX exploration. The core need is to decouple: - -1. **Consuming a stream** (continuous, independent of sends) -2. **Sending messages** (writes to proxy, doesn't wait for response) - -#### Areas to Explore - -##### Option A: Explicit Methods - -```typescript -// New method - consume an external stream -async consumeStream(stream: AsyncIterable): Promise { - for await (const chunk of stream) { - this.callbacksRef.current.onChunk(chunk) - this.processor.processChunk(chunk) - } -} - -// New method - send without expecting response via connection -async sendMessageToStream(content: string): Promise { - const userMessage = this.processor.addUserMessage(content) - this.events.messageSent(userMessage.id, content) - return userMessage -} -``` - -##### Option B: Session-Aware Connection Adapter - -Extend the connection adapter interface: - -```typescript -interface SessionConnectionAdapter extends ConnectionAdapter { - // Subscribe to persistent stream - subscribe(): AsyncIterable - - // Write message (doesn't return response stream) - write(message: UserMessage): Promise - - // URL renewal support - renewUrl?(): Promise -} -``` - -ChatClient detects session adapter and behaves accordingly. - -##### Option C: Always Session-Compatible with Extension Hooks - -Make ChatClient always work in a session-compatible way, with hooks for customization: - -```typescript -interface ChatClientOptions { - // ... existing options - - // Hook: customize how user messages are added (e.g., optimistic + pending state) - onUserMessageCreated?: (message: UIMessage) => UIMessage - - // Hook: customize send behavior (e.g., write to proxy instead of connect) - sendHandler?: (message: UIMessage, messages: UIMessage[]) => Promise - - // Hook: customize how incoming messages are reconciled - reconcileMessage?: (incoming: UIMessage, existing: UIMessage | undefined) => UIMessage -} -``` - -##### Integration with Durable Fetch - -The existing `fetchServerSentEvents` adapter supports a `fetchClient` option for custom fetch implementations. A durable fetch client can intercept requests and route through the proxy: - -```typescript -const durableFetch = createDurableFetch({ proxyUrl: PROXY_URL }) - -const connection = fetchServerSentEvents('/api/chat', { - fetchClient: durableFetch -}) -``` - -The question is how this integrates with session streams: - -1. Does `durableFetch` return a stream URL that the client then subscribes to? -2. How does the subscription lifecycle map to the connection adapter interface? -3. How are user messages written vs. responses consumed? - -**These integration patterns need further exploration.** - ---- - -### Phase 4: useChat Hook Updates - -**File**: `packages/typescript/ai-react/src/use-chat.ts` - -```typescript -interface UseChatOptions { - // ... existing options - - /** - * Session stream to consume. When provided, messages are consumed from - * this stream instead of using request-response flow. - */ - sessionStream?: AsyncIterable - - /** - * Handler for writing messages in session mode. - * Called instead of connection.connect() when sessionStream is provided. - */ - onSendMessage?: (message: UIMessage) => Promise - - /** - * Called when initial history has been loaded from the stream. - */ - onConnected?: (messageCount: number) => void -} -``` - -**Rationale**: Clean session support at hook level. Fully backwards compatible. - ---- - -## Migration Path - -### For Existing Users - -No changes required. All existing code continues to work: - -- `useChat` with `connection` adapter works identically -- Request-response flow unchanged -- Types are backwards compatible (existing `'assistant'` role still valid) - -### For Session Stream Adopters - -1. Update to new version -2. Provide `sessionStream` and `onSendMessage` options -3. Set up durable stream infrastructure (proxy, stream client) - ---- - -## Open Questions - -### DX Design - -1. **consumeStream vs sessionStream option**: Should stream consumption be a method call or a configuration option? - -2. **Integration with connection adapters**: How should session streams integrate with the existing `fetchServerSentEvents` / durable fetch pattern? Is a new adapter type needed? - -3. **Optimistic updates**: How should the client handle optimistic user message display while waiting for stream confirmation? Extension hooks? Built-in pending state? - -4. **Loading state semantics**: With multiple concurrent messages, what does `isLoading` mean? Per-message loading state? - -### Protocol Alignment - -1. **MessagesSnapshot**: Should we support AG-UI's `MessagesSnapshot` event for initial state sync? How does this interact with streaming history? - -2. **Tool execution gating**: In multi-user streams, should we gate auto-tool-execution to only "our" runs? How do we identify message/run origin? - -### Infrastructure - -1. **URL renewal**: How should stream URL renewal (for expiring signatures) be exposed? Callback? Automatic retry? - -2. **Reconnection**: Should the client handle reconnection, or delegate to the stream client library? - ---- - -## Appendix: Current Code References - -### Key Files - -| File | Purpose | -|------|---------| -| `packages/typescript/ai/src/types.ts` | Core types including StreamChunk, AG-UI events | -| `packages/typescript/ai/src/activities/chat/stream/processor.ts` | StreamProcessor - chunk processing state machine | -| `packages/typescript/ai-client/src/chat-client.ts` | ChatClient - orchestrates connection and processor | -| `packages/typescript/ai-client/src/connection-adapters.ts` | Connection adapter implementations | -| `packages/typescript/ai-react/src/use-chat.ts` | React hook integration | - -### Current StreamProcessor Limitations - -1. **Single message tracking**: `currentAssistantMessageId` is singular -2. **ID generation**: Always generates new IDs, ignores chunk messageId -3. **Role assumption**: Hardcodes `'assistant'` role -4. **Tool correlation**: No explicit message-to-tool mapping (relies on temporal ordering) - -### Current ChatClient Limitations - -1. **Coupled send/receive**: `sendMessage` → `streamResponse` → `connect` → `processStream` -2. **Blocking sends**: `isLoading` prevents concurrent sends -3. **No external chunk injection**: Can't feed chunks from external stream - ---- - -## References - -- [AG-UI Protocol Events](https://docs.ag-ui.com/concepts/events) -- [Durable Streams](https://electric-sql.com/products/durable-streams) -- [TanStack AI Connection Adapters](https://tanstack.com/ai/latest/docs/guides/connection-adapters) From fd1c50caea6da8a709528b9635e9aa800ec5ded1 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Tue, 10 Feb 2026 11:39:18 -0800 Subject: [PATCH 11/20] fix(ai, ai-client): address stream lifecycle edge cases from PR review - Guard against double onStreamEnd when RUN_FINISHED arrives before TEXT_MESSAGE_END - Clear dead waiters on subscribe exit to prevent chunk loss on reconnection - Reset transient processor state (messageStates, activeMessageIds, etc.) on MESSAGES_SNAPSHOT - Remove optimistic startAssistantMessage() from streamResponse(); let stream events create the message naturally via TEXT_MESSAGE_START or ensureAssistantMessage() - Clean up abort listeners on normal waiter resolution to prevent listener accumulation - Make handleStepFinishedEvent use ensureAssistantMessage() for backward compat with streams that lack TEXT_MESSAGE_START Co-Authored-By: Claude Opus 4.6 --- .../typescript/ai-client/src/chat-client.ts | 33 +++-- .../ai-client/src/session-adapter.ts | 9 +- .../ai-client/tests/session-adapter.test.ts | 58 ++++++++ .../src/activities/chat/stream/processor.ts | 9 +- .../ai/tests/stream-processor.test.ts | 124 ++++++++++++++++++ 5 files changed, 215 insertions(+), 18 deletions(-) diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index 650c1a2b..c5d7a7d1 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -104,6 +104,17 @@ export class ChatClient { }, onStreamStart: () => { this.setStatus('streaming') + const messages = this.processor.getMessages() + const lastAssistant = messages.findLast( + (m: UIMessage) => m.role === 'assistant', + ) + if (lastAssistant) { + this.currentMessageId = lastAssistant.id + this.events.messageAppended( + lastAssistant, + this.currentStreamId || undefined, + ) + } }, onStreamEnd: (message: UIMessage) => { this.callbacksRef.current.onFinish(message) @@ -269,6 +280,12 @@ export class ChatClient { if (signal.aborted) break this.callbacksRef.current.onChunk(chunk) this.processor.processChunk(chunk) + // RUN_FINISHED signals run completion — resolve processing + // (redundant if onStreamEnd already resolved it, harmless) + if (chunk.type === 'RUN_FINISHED') { + this.processingResolve?.() + this.processingResolve = null + } // Yield control back to event loop for UI updates await new Promise((resolve) => setTimeout(resolve, 0)) } @@ -444,21 +461,9 @@ export class ChatClient { // Clear the pending message body after use this.pendingMessageBody = undefined - // Generate stream ID and start assistant message + // Generate stream ID — assistant message will be created by stream events this.currentStreamId = this.generateUniqueId('stream') - const messageId = this.processor.startAssistantMessage() - this.currentMessageId = messageId - - const assistantMessage: UIMessage = { - id: messageId, - role: 'assistant', - parts: [], - createdAt: new Date(), - } - this.events.messageAppended( - assistantMessage, - this.currentStreamId || undefined, - ) + this.currentMessageId = null // Ensure subscription loop is running this.ensureSubscription() diff --git a/packages/typescript/ai-client/src/session-adapter.ts b/packages/typescript/ai-client/src/session-adapter.ts index 93805834..40b98361 100644 --- a/packages/typescript/ai-client/src/session-adapter.ts +++ b/packages/typescript/ai-client/src/session-adapter.ts @@ -63,10 +63,12 @@ export function createDefaultSession( chunk = buffer.shift()! } else { chunk = await new Promise((resolve) => { - waiters.push(resolve) - signal?.addEventListener('abort', () => resolve(null), { - once: true, + const onAbort = () => resolve(null) + waiters.push((c) => { + signal?.removeEventListener('abort', onAbort) + resolve(c) }) + signal?.addEventListener('abort', onAbort, { once: true }) }) } if (chunk !== null) yield chunk @@ -74,6 +76,7 @@ export function createDefaultSession( // Discard any chunks buffered after abort to prevent stale data // leaking into the next subscription buffer.length = 0 + waiters.length = 0 }, async send(messages, data, signal) { diff --git a/packages/typescript/ai-client/tests/session-adapter.test.ts b/packages/typescript/ai-client/tests/session-adapter.test.ts index 11046766..65e8b4aa 100644 --- a/packages/typescript/ai-client/tests/session-adapter.test.ts +++ b/packages/typescript/ai-client/tests/session-adapter.test.ts @@ -211,4 +211,62 @@ describe('createDefaultSession', () => { expect(onConnect).toHaveBeenCalledWith([], undefined, abortController.signal) }) + + it('should not lose chunks after stop-then-resume subscription cycle', async () => { + const connection = createMockConnectionAdapter({ chunks: [] }) + const session = createDefaultSession(connection) + + // First subscription — abort while waiting (simulates stop) + const ac1 = new AbortController() + const iter1 = session.subscribe(ac1.signal) + + // Start consuming — will block waiting for chunks + const result1Promise = (async () => { + const received: Array = [] + for await (const chunk of iter1) { + received.push(chunk) + } + return received + })() + + // Let the subscriber enter the wait path + await new Promise((resolve) => setTimeout(resolve, 10)) + + // Abort — this resolves the dead waiter with null + ac1.abort() + const received1 = await result1Promise + expect(received1).toEqual([]) + + // Second subscription — should work correctly + const ac2 = new AbortController() + const iter2 = session.subscribe(ac2.signal) + + // Send a chunk — it should be delivered to the new subscriber + const testChunk: StreamChunk = { + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-1', + model: 'test', + timestamp: Date.now(), + delta: 'Hello', + content: 'Hello', + } + + // Override connect to yield the test chunk + connection.connect = function* () { + yield testChunk + } as any + + await session.send([], undefined) + + const received2: Array = [] + for await (const chunk of iter2) { + received2.push(chunk) + if (received2.length === 1) { + ac2.abort() + } + } + + // The chunk should NOT be lost + expect(received2).toEqual([testChunk]) + }) }) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 8d946d42..981ce194 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -678,6 +678,7 @@ export class StreamProcessor { const { messageId } = chunk const state = this.getMessageState(messageId) if (!state) return + if (state.isComplete) return // Emit any pending text for this message if (state.currentSegmentText !== state.lastEmittedText) { @@ -695,6 +696,10 @@ export class StreamProcessor { chunk: Extract, ): void { this.messages = [...chunk.messages] + this.messageStates.clear() + this.activeMessageIds.clear() + this.toolCallToMessage.clear() + this.pendingManualMessageId = null this.emitMessagesChange() } @@ -1001,7 +1006,9 @@ export class StreamProcessor { private handleStepFinishedEvent( chunk: Extract, ): void { - const { messageId, state } = this.ensureAssistantMessage() + const { messageId, state } = this.ensureAssistantMessage( + this.getActiveAssistantMessageId() ?? undefined, + ) const previous = state.thinkingContent let nextThinking = previous diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index 23912974..033afabc 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -2328,4 +2328,128 @@ describe('StreamProcessor', () => { } }) }) + + describe('double onStreamEnd guard', () => { + it('should fire onStreamEnd exactly once when RUN_FINISHED arrives before TEXT_MESSAGE_END', () => { + const onStreamEnd = vi.fn() + const processor = new StreamProcessor({ events: { onStreamEnd } }) + + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-1', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-1', + delta: 'Hello', + timestamp: Date.now(), + } as StreamChunk) + + // RUN_FINISHED fires first — calls finalizeStream which sets isComplete and fires onStreamEnd + processor.processChunk({ + type: 'RUN_FINISHED', + model: 'test', + timestamp: Date.now(), + finishReason: 'stop', + } as StreamChunk) + + expect(onStreamEnd).toHaveBeenCalledTimes(1) + + // TEXT_MESSAGE_END arrives after — should NOT fire onStreamEnd again + processor.processChunk({ + type: 'TEXT_MESSAGE_END', + messageId: 'msg-1', + timestamp: Date.now(), + } as StreamChunk) + + expect(onStreamEnd).toHaveBeenCalledTimes(1) + }) + }) + + describe('MESSAGES_SNAPSHOT resets transient state', () => { + it('should reset stale state and process subsequent stream events correctly', () => { + const onStreamEnd = vi.fn() + const processor = new StreamProcessor({ events: { onStreamEnd } }) + + // Simulate an active streaming session + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-old', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-old', + delta: 'Old content', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TOOL_CALL_START', + toolCallId: 'tc-old', + toolName: 'oldTool', + parentMessageId: 'msg-old', + timestamp: Date.now(), + } as StreamChunk) + + // MESSAGES_SNAPSHOT replaces everything (e.g., on reconnection) + processor.processChunk({ + type: 'MESSAGES_SNAPSHOT', + messages: [ + { + id: 'snap-user', + role: 'user', + parts: [{ type: 'text', content: 'Hello' }], + createdAt: new Date(), + }, + ], + timestamp: Date.now(), + } as StreamChunk) + + // Verify old messages are replaced + const messagesAfterSnapshot = processor.getMessages() + expect(messagesAfterSnapshot).toHaveLength(1) + expect(messagesAfterSnapshot[0]?.id).toBe('snap-user') + + // New stream events should be processed correctly without stale state + processor.processChunk({ + type: 'TEXT_MESSAGE_START', + messageId: 'msg-new', + role: 'assistant', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-new', + delta: 'New content', + timestamp: Date.now(), + } as StreamChunk) + + processor.processChunk({ + type: 'TEXT_MESSAGE_END', + messageId: 'msg-new', + timestamp: Date.now(), + } as StreamChunk) + + const finalMessages = processor.getMessages() + expect(finalMessages).toHaveLength(2) + expect(finalMessages[1]?.id).toBe('msg-new') + expect(finalMessages[1]?.parts[0]).toEqual({ + type: 'text', + content: 'New content', + }) + + // onStreamEnd fires from finalizeStream, not TEXT_MESSAGE_END + expect(onStreamEnd).not.toHaveBeenCalled() + processor.finalizeStream() + expect(onStreamEnd).toHaveBeenCalledTimes(1) + expect(onStreamEnd.mock.calls[0]![0].id).toBe('msg-new') + }) + }) }) From 8c628eea361754c7c125f2b59664ebd060373463 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Tue, 10 Feb 2026 17:59:17 -0800 Subject: [PATCH 12/20] fix(ai-client): fix reload failures from stale stream state and waiter race Reset processor stream state (prepareAssistantMessage) in streamResponse() before the subscription loop, preventing stale messageStates from blocking new assistant message creation on reload. Rewrite createDefaultSession with per-subscribe queue isolation: each subscribe() synchronously installs fresh buffer/waiters, drains pre-buffered chunks via splice(0), and removes async cleanup that raced with new subscription cycles. Co-Authored-By: Claude Opus 4.6 --- .../typescript/ai-client/src/chat-client.ts | 5 ++ .../ai-client/src/session-adapter.ts | 61 ++++++++++++------- 2 files changed, 43 insertions(+), 23 deletions(-) diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index c5d7a7d1..2b7aa6ba 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -465,6 +465,11 @@ export class ChatClient { this.currentStreamId = this.generateUniqueId('stream') this.currentMessageId = null + // Reset processor stream state for new response — prevents stale + // messageStates entries (from a previous stream) from blocking + // creation of a new assistant message (e.g. after reload). + this.processor.prepareAssistantMessage() + // Ensure subscription loop is running this.ensureSubscription() diff --git a/packages/typescript/ai-client/src/session-adapter.ts b/packages/typescript/ai-client/src/session-adapter.ts index 40b98361..6272d46a 100644 --- a/packages/typescript/ai-client/src/session-adapter.ts +++ b/packages/typescript/ai-client/src/session-adapter.ts @@ -39,44 +39,59 @@ export interface SessionAdapter { * Wraps a ConnectionAdapter into a SessionAdapter using an async queue pattern. * send() calls connection.connect() and pushes chunks to the queue. * subscribe() yields chunks from the queue. + * + * Each subscribe() call synchronously replaces the active buffer/waiters + * so that concurrent send() calls write to the current subscription's queue. + * This prevents a race condition where an old subscription's async cleanup + * (clearing the shared buffer after abort) could destroy chunks intended + * for a new subscription. */ export function createDefaultSession( connection: ConnectionAdapter, ): SessionAdapter { - const buffer: Array = [] - const waiters: Array<(chunk: StreamChunk | null) => void> = [] + // Active buffer and waiters — replaced synchronously on each subscribe() call + let activeBuffer: Array = [] + let activeWaiters: Array<(chunk: StreamChunk | null) => void> = [] function push(chunk: StreamChunk): void { - const waiter = waiters.shift() + const waiter = activeWaiters.shift() if (waiter) { waiter(chunk) } else { - buffer.push(chunk) + activeBuffer.push(chunk) } } return { - async *subscribe(signal?: AbortSignal) { - while (!signal?.aborted) { - let chunk: StreamChunk | null - if (buffer.length > 0) { - chunk = buffer.shift()! - } else { - chunk = await new Promise((resolve) => { - const onAbort = () => resolve(null) - waiters.push((c) => { - signal?.removeEventListener('abort', onAbort) - resolve(c) + subscribe(signal?: AbortSignal): AsyncIterable { + // Drain any buffered chunks (e.g. from send() before subscribe()) into + // a fresh per-subscription buffer. splice(0) atomically empties the old + // array, so a previous subscription's local reference becomes empty. + const myBuffer: Array = activeBuffer.splice(0) + const myWaiters: Array<(chunk: StreamChunk | null) => void> = [] + activeBuffer = myBuffer + activeWaiters = myWaiters + + return (async function* () { + while (!signal?.aborted) { + let chunk: StreamChunk | null + if (myBuffer.length > 0) { + chunk = myBuffer.shift()! + } else { + chunk = await new Promise((resolve) => { + const onAbort = () => resolve(null) + myWaiters.push((c) => { + signal?.removeEventListener('abort', onAbort) + resolve(c) + }) + signal?.addEventListener('abort', onAbort, { once: true }) }) - signal?.addEventListener('abort', onAbort, { once: true }) - }) + } + if (chunk !== null) yield chunk } - if (chunk !== null) yield chunk - } - // Discard any chunks buffered after abort to prevent stale data - // leaking into the next subscription - buffer.length = 0 - waiters.length = 0 + // No shared-state cleanup needed — myBuffer/myWaiters are local + // and will be garbage collected when this generator is released. + })() }, async send(messages, data, signal) { From 36d6a933ddc05562508e43e5c3f170d95a3c7b96 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 11 Feb 2026 02:29:27 +0000 Subject: [PATCH 13/20] ci: apply automated fixes --- packages/typescript/ai-client/src/chat-client.ts | 5 +---- packages/typescript/ai-client/src/index.ts | 5 +---- .../ai-client/tests/session-adapter.test.ts | 6 +++++- .../ai/src/activities/chat/stream/processor.ts | 13 +++++-------- 4 files changed, 12 insertions(+), 17 deletions(-) diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index 2b7aa6ba..e3621fc6 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -531,10 +531,7 @@ export class ChatClient { try { await this.checkForContinuation() } catch (error) { - console.error( - 'Failed to continue flow after tool result:', - error, - ) + console.error('Failed to continue flow after tool result:', error) } } } diff --git a/packages/typescript/ai-client/src/index.ts b/packages/typescript/ai-client/src/index.ts index b7a77a47..0ad066a9 100644 --- a/packages/typescript/ai-client/src/index.ts +++ b/packages/typescript/ai-client/src/index.ts @@ -30,10 +30,7 @@ export { type ConnectionAdapter, type FetchConnectionOptions, } from './connection-adapters' -export { - createDefaultSession, - type SessionAdapter, -} from './session-adapter' +export { createDefaultSession, type SessionAdapter } from './session-adapter' // Re-export message converters from @tanstack/ai export { diff --git a/packages/typescript/ai-client/tests/session-adapter.test.ts b/packages/typescript/ai-client/tests/session-adapter.test.ts index 65e8b4aa..0fed006a 100644 --- a/packages/typescript/ai-client/tests/session-adapter.test.ts +++ b/packages/typescript/ai-client/tests/session-adapter.test.ts @@ -209,7 +209,11 @@ describe('createDefaultSession', () => { const abortController = new AbortController() await session.send([], undefined, abortController.signal) - expect(onConnect).toHaveBeenCalledWith([], undefined, abortController.signal) + expect(onConnect).toHaveBeenCalledWith( + [], + undefined, + abortController.signal, + ) }) it('should not lose chunks after stop-then-resume subscription cycle', async () => { diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 981ce194..8b1388c8 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -17,10 +17,7 @@ * @see docs/chat-architecture.md — Canonical reference for AG-UI chunk ordering, * adapter contract, single-shot flows, and expected UIMessage output. */ -import { - generateMessageId, - uiMessageToModelMessages, -} from '../messages.js' +import { generateMessageId, uiMessageToModelMessages } from '../messages.js' import { defaultJSONParser } from './json-parser' import { updateTextPart, @@ -520,9 +517,7 @@ export class StreamProcessor { /** * Get the MessageStreamState for a message */ - private getMessageState( - messageId: string, - ): MessageStreamState | undefined { + private getMessageState(messageId: string): MessageStreamState | undefined { return this.messageStates.get(messageId) } @@ -638,7 +633,9 @@ export class StreamProcessor { // If tool calls happened since last text, this TEXT_MESSAGE_START // signals a new text segment — reset segment accumulation if (existingState.hasToolCallsSinceTextStart) { - if (existingState.currentSegmentText !== existingState.lastEmittedText) { + if ( + existingState.currentSegmentText !== existingState.lastEmittedText + ) { this.emitTextUpdateForMessage(messageId) } existingState.currentSegmentText = '' From 2abc6c7ba78b7c32b6347e4260ed949c523c2a71 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Tue, 10 Feb 2026 19:02:36 -0800 Subject: [PATCH 14/20] fix(ai): resolve eslint errors in stream processor Remove unnecessary `chunk.delta !== undefined` condition (delta is always a string on TextMessageContentEvent) and remove redundant `!` non-null assertion inside an already-narrowed `if` block. Co-Authored-By: Claude Opus 4.6 --- .../typescript/ai/src/activities/chat/stream/processor.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 8b1388c8..3c2eb36f 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -743,7 +743,7 @@ export class StreamProcessor { // Prefer delta over content - delta is the incremental change // Check for both undefined and empty string to avoid "undefined" string concatenation - if (chunk.delta !== undefined && chunk.delta !== '') { + if (chunk.delta !== '') { nextText = currentText + chunk.delta } else if (chunk.content !== undefined && chunk.content !== '') { // Fallback: use content if delta is not provided @@ -1260,7 +1260,7 @@ export class StreamProcessor { if (lastAssistantMessage && !this.hasError) { if (this.isWhitespaceOnlyMessage(lastAssistantMessage)) { this.messages = this.messages.filter( - (m) => m.id !== lastAssistantMessage!.id, + (m) => m.id !== lastAssistantMessage.id, ) this.emitMessagesChange() return From c5e1aa3e5cdd4c366f188cea59e2cbc2f06a7508 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Tue, 10 Feb 2026 19:05:24 -0800 Subject: [PATCH 15/20] fix(ai-client): resolve eslint errors in chat-client and session-adapter Fix import ordering: move value import `createDefaultSession` above type-only imports. Convert shorthand method signatures to function property style in the SessionAdapter interface. Co-Authored-By: Claude Opus 4.6 --- packages/typescript/ai-client/src/chat-client.ts | 4 ++-- packages/typescript/ai-client/src/session-adapter.ts | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index e3621fc6..b89fde97 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -4,6 +4,7 @@ import { normalizeToUIMessage, } from '@tanstack/ai' import { DefaultChatClientEventEmitter } from './events' +import { createDefaultSession } from './session-adapter' import type { AnyClientTool, ContentPart, @@ -11,9 +12,8 @@ import type { StreamChunk, } from '@tanstack/ai' import type { ConnectionAdapter } from './connection-adapters' -import type { SessionAdapter } from './session-adapter' -import { createDefaultSession } from './session-adapter' import type { ChatClientEventEmitter } from './events' +import type { SessionAdapter } from './session-adapter' import type { ChatClientOptions, ChatClientState, diff --git a/packages/typescript/ai-client/src/session-adapter.ts b/packages/typescript/ai-client/src/session-adapter.ts index 6272d46a..eefc5c80 100644 --- a/packages/typescript/ai-client/src/session-adapter.ts +++ b/packages/typescript/ai-client/src/session-adapter.ts @@ -21,18 +21,18 @@ export interface SessionAdapter { * to hydrate the conversation, then subscribe to the live stream * from the appropriate offset. */ - subscribe(signal?: AbortSignal): AsyncIterable + subscribe: (signal?: AbortSignal) => AsyncIterable /** * Send messages to the session. * For durable sessions, the proxy writes to the stream and forwards to the API. * The response arrives through subscribe(), not as a return value. */ - send( + send: ( messages: Array, data?: Record, signal?: AbortSignal, - ): Promise + ) => Promise } /** From 537b73ca814481d0e74cf05841dde4670dfb6080 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Tue, 10 Feb 2026 19:05:46 -0800 Subject: [PATCH 16/20] fix(ai-client): propagate send() errors to subscribe() consumers Wrap createDefaultSession's send() in try/catch and push a RUN_ERROR AG-UI event to the queue before re-throwing, so subscribe() consumers learn about connection failures through the standard protocol. Also resolve processingResolve on RUN_ERROR in consumeSubscription (same as RUN_FINISHED) to prevent hangs. Tests updated: error assertions now check message content rather than referential identity, since errors flowing through RUN_ERROR create new Error instances from the message string. Co-Authored-By: Claude Opus 4.6 --- .../typescript/ai-client/src/chat-client.ts | 4 ++-- .../ai-client/src/session-adapter.ts | 21 ++++++++++++++++--- .../ai-client/tests/chat-client.test.ts | 10 ++++++--- 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index b89fde97..1575d6e5 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -280,9 +280,9 @@ export class ChatClient { if (signal.aborted) break this.callbacksRef.current.onChunk(chunk) this.processor.processChunk(chunk) - // RUN_FINISHED signals run completion — resolve processing + // RUN_FINISHED / RUN_ERROR signal run completion — resolve processing // (redundant if onStreamEnd already resolved it, harmless) - if (chunk.type === 'RUN_FINISHED') { + if (chunk.type === 'RUN_FINISHED' || chunk.type === 'RUN_ERROR') { this.processingResolve?.() this.processingResolve = null } diff --git a/packages/typescript/ai-client/src/session-adapter.ts b/packages/typescript/ai-client/src/session-adapter.ts index eefc5c80..5fda2aa0 100644 --- a/packages/typescript/ai-client/src/session-adapter.ts +++ b/packages/typescript/ai-client/src/session-adapter.ts @@ -95,9 +95,24 @@ export function createDefaultSession( }, async send(messages, data, signal) { - const stream = connection.connect(messages, data, signal) - for await (const chunk of stream) { - push(chunk) + try { + const stream = connection.connect(messages, data, signal) + for await (const chunk of stream) { + push(chunk) + } + } catch (err) { + // Push a RUN_ERROR event so subscribe() consumers learn about the + // failure through the standard AG-UI protocol, then re-throw so + // send() callers (e.g. streamResponse) can also handle it. + push({ + type: 'RUN_ERROR', + timestamp: Date.now(), + error: { + message: + err instanceof Error ? err.message : 'Unknown error in send()', + }, + }) + throw err } }, } diff --git a/packages/typescript/ai-client/tests/chat-client.test.ts b/packages/typescript/ai-client/tests/chat-client.test.ts index eaf2a778..ede3e2da 100644 --- a/packages/typescript/ai-client/tests/chat-client.test.ts +++ b/packages/typescript/ai-client/tests/chat-client.test.ts @@ -393,8 +393,11 @@ describe('ChatClient', () => { await client.sendMessage('Hello') - expect(onError).toHaveBeenCalledWith(error) - expect(client.getError()).toBe(error) + expect(onError).toHaveBeenCalled() + expect(onError.mock.calls[0]![0]).toBeInstanceOf(Error) + expect(onError.mock.calls[0]![0].message).toBe('Connection failed') + expect(client.getError()).toBeInstanceOf(Error) + expect(client.getError()?.message).toBe('Connection failed') }) }) @@ -506,7 +509,8 @@ describe('ChatClient', () => { await client.sendMessage('Hello') - expect(client.getError()).toBe(error) + expect(client.getError()).toBeInstanceOf(Error) + expect(client.getError()?.message).toBe('Network error') expect(client.getStatus()).toBe('error') }) From fc52ef7ffdee589fa7e1b306775fdb395fa02954 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Wed, 11 Feb 2026 12:38:11 -0800 Subject: [PATCH 17/20] fix(ai): map 'tool' role to 'assistant' in message state to fix lookups The stream processor mapped 'tool' to 'assistant' for UIMessage but stored the raw 'tool' role in MessageStreamState. This caused getActiveAssistantMessageId() and getCurrentAssistantMessageId() to miss tool-role messages, so subsequent stream events couldn't attach to the existing message. Now the uiRole mapping is applied consistently across all three cases in handleTextMessageStartEvent. Co-Authored-By: Claude Opus 4.6 --- .../ai/src/activities/chat/stream/processor.ts | 18 ++++++++++-------- .../ai/src/activities/chat/stream/types.ts | 2 +- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 3c2eb36f..e87010e5 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -496,7 +496,7 @@ export class StreamProcessor { */ private createMessageState( messageId: string, - role: 'user' | 'assistant' | 'system' | 'tool', + role: 'user' | 'assistant' | 'system', ): MessageStreamState { const state: MessageStreamState = { id: messageId, @@ -588,6 +588,12 @@ export class StreamProcessor { ): void { const { messageId, role } = chunk + // Map 'tool' role to 'assistant' for both UIMessage and MessageStreamState + // (UIMessage doesn't support 'tool' role, and lookups like + // getActiveAssistantMessageId() check state.role === 'assistant') + const uiRole: 'system' | 'user' | 'assistant' = + role === 'tool' ? 'assistant' : role + // Case 1: A manual message was created via startAssistantMessage() if (this.pendingManualMessageId) { const pendingId = this.pendingManualMessageId @@ -614,7 +620,7 @@ export class StreamProcessor { // Ensure state exists if (!this.messageStates.has(messageId)) { - this.createMessageState(messageId, role) + this.createMessageState(messageId, uiRole) this.activeMessageIds.add(messageId) } @@ -627,7 +633,7 @@ export class StreamProcessor { if (existingMsg) { this.activeMessageIds.add(messageId) if (!this.messageStates.has(messageId)) { - this.createMessageState(messageId, role) + this.createMessageState(messageId, uiRole) } else { const existingState = this.messageStates.get(messageId)! // If tool calls happened since last text, this TEXT_MESSAGE_START @@ -647,10 +653,6 @@ export class StreamProcessor { } // Case 3: New message from the stream - // Map 'tool' role to 'assistant' for UIMessage (UIMessage doesn't support 'tool' role) - const uiRole: 'system' | 'user' | 'assistant' = - role === 'tool' ? 'assistant' : role - const newMessage: UIMessage = { id: messageId, role: uiRole, @@ -659,7 +661,7 @@ export class StreamProcessor { } this.messages = [...this.messages, newMessage] - this.createMessageState(messageId, role) + this.createMessageState(messageId, uiRole) this.activeMessageIds.add(messageId) this.events.onStreamStart?.() diff --git a/packages/typescript/ai/src/activities/chat/stream/types.ts b/packages/typescript/ai/src/activities/chat/stream/types.ts index 8bde07b7..c1806238 100644 --- a/packages/typescript/ai/src/activities/chat/stream/types.ts +++ b/packages/typescript/ai/src/activities/chat/stream/types.ts @@ -52,7 +52,7 @@ export interface ChunkStrategy { */ export interface MessageStreamState { id: string - role: 'user' | 'assistant' | 'system' | 'tool' + role: 'user' | 'assistant' | 'system' totalTextContent: string currentSegmentText: string lastEmittedText: string From ed1cddb066856d630759aa464429aafaedc7135e Mon Sep 17 00:00:00 2001 From: James Arthur Date: Thu, 12 Feb 2026 12:08:05 -0800 Subject: [PATCH 18/20] fix(ai): normalize chunk.delta to avoid "undefined" string concatenation When chunk.delta was undefined, the check `chunk.delta !== ''` evaluated to true, causing "undefined" to be concatenated into nextText. Use `chunk.delta ?? ''` to normalize before comparison, matching the safe pattern already used in handleToolCallArgsEvent. Co-Authored-By: Claude Opus 4.6 --- .../typescript/ai/src/activities/chat/stream/processor.ts | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index e87010e5..0aa09f87 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -744,9 +744,10 @@ export class StreamProcessor { let nextText = currentText // Prefer delta over content - delta is the incremental change - // Check for both undefined and empty string to avoid "undefined" string concatenation - if (chunk.delta !== '') { - nextText = currentText + chunk.delta + // Normalize to empty string to avoid "undefined" string concatenation + const delta = chunk.delta ?? '' + if (delta !== '') { + nextText = currentText + delta } else if (chunk.content !== undefined && chunk.content !== '') { // Fallback: use content if delta is not provided if (chunk.content.startsWith(currentText)) { From fd7c22634fefffea5bece09633cc90cf1889c9f9 Mon Sep 17 00:00:00 2001 From: James Arthur Date: Thu, 12 Feb 2026 12:41:13 -0800 Subject: [PATCH 19/20] fix(ai): use || instead of ?? for chunk.delta fallback to satisfy eslint The no-unnecessary-condition rule flags ?? since TypeScript types delta as string. Using || preserves runtime safety and matches existing patterns. Co-Authored-By: Claude Opus 4.6 --- packages/typescript/ai/src/activities/chat/stream/processor.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 0aa09f87..5a7adaac 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -745,7 +745,7 @@ export class StreamProcessor { // Prefer delta over content - delta is the incremental change // Normalize to empty string to avoid "undefined" string concatenation - const delta = chunk.delta ?? '' + const delta = chunk.delta || '' if (delta !== '') { nextText = currentText + delta } else if (chunk.content !== undefined && chunk.content !== '') { From 64f55172c73838c60ac40ee2fc15e3e484660a9b Mon Sep 17 00:00:00 2001 From: James Arthur Date: Thu, 12 Feb 2026 12:55:13 -0800 Subject: [PATCH 20/20] fix(ai): reset stream flags on MESSAGES_SNAPSHOT to avoid stale state handleMessagesSnapshotEvent was clearing maps but not resetting isDone, hasError, and finishReason. Use resetStreamState() which handles all of these, ensuring finalizeStream() sees fresh state after a snapshot. Co-Authored-By: Claude Opus 4.6 --- .../typescript/ai/src/activities/chat/stream/processor.ts | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 5a7adaac..555c46d4 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -694,11 +694,8 @@ export class StreamProcessor { private handleMessagesSnapshotEvent( chunk: Extract, ): void { + this.resetStreamState() this.messages = [...chunk.messages] - this.messageStates.clear() - this.activeMessageIds.clear() - this.toolCallToMessage.clear() - this.pendingManualMessageId = null this.emitMessagesChange() }