From 10b8eba4be04769dfc3e4533d30db543cfbd0040 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Fri, 25 Apr 2025 09:22:32 -0700 Subject: [PATCH 01/20] feat (ui/react): support resuming an ongoing stream --- .changeset/nine-pillows-hug.md | 6 + content/docs/04-ai-sdk-ui/02-chatbot.mdx | 71 +++++++ examples/next-openai/.gitignore | 3 +- .../app/api/use-chat-resume/route.ts | 102 +++++++++ .../app/use-chat-resume/[id]/page.tsx | 14 ++ .../next-openai/app/use-chat-resume/chat.tsx | 109 ++++++++++ .../next-openai/app/use-chat-resume/page.tsx | 8 + examples/next-openai/package.json | 12 +- examples/next-openai/util/chat-store.ts | 50 ++++- packages/react/src/use-chat.ts | 106 ++++++++++ packages/react/src/use-chat.ui.test.tsx | 50 +++++ packages/ui-utils/src/call-chat-api.ts | 88 ++++++++ packages/ui-utils/src/index.ts | 2 +- pnpm-lock.yaml | 196 +++++++++++++++--- turbo.json | 40 +++- 15 files changed, 816 insertions(+), 41 deletions(-) create mode 100644 .changeset/nine-pillows-hug.md create mode 100644 examples/next-openai/app/api/use-chat-resume/route.ts create mode 100644 examples/next-openai/app/use-chat-resume/[id]/page.tsx create mode 100644 examples/next-openai/app/use-chat-resume/chat.tsx create mode 100644 examples/next-openai/app/use-chat-resume/page.tsx diff --git a/.changeset/nine-pillows-hug.md b/.changeset/nine-pillows-hug.md new file mode 100644 index 000000000000..5ff42392b2e0 --- /dev/null +++ b/.changeset/nine-pillows-hug.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/ui-utils': patch +'@ai-sdk/react': patch +--- + +feat (ui/react): support resuming an ongoing stream diff --git a/content/docs/04-ai-sdk-ui/02-chatbot.mdx b/content/docs/04-ai-sdk-ui/02-chatbot.mdx index 8611213b06ba..6494a2d06c13 100644 --- a/content/docs/04-ai-sdk-ui/02-chatbot.mdx +++ b/content/docs/04-ai-sdk-ui/02-chatbot.mdx @@ -634,6 +634,77 @@ messages.map(message => ( )); ``` +## Resumable Streams (Experimental) + +The `useChat` hook has experimental support for resuming an ongoing chat generation stream by any client, either after a network disconnect or reloading the chat. This can be useful for building applications that involve long-running conversations or for ensuring that messages are not lost in case of network failures. + +The following are the pre-requisities for your chat application to support resumable streams: + +- Installing the [`resumable-stream`](https://www.npmjs.com/package/resumable-stream) package that helps create and manage the publisher/subscriber mechanism of the streams. +- A [Redis](https://vercel.com/marketplace/redis) instance to store the stream state. +- A table that tracks the stream IDs associated with a chat. + +To resume a chat stream, you can use the `experimental_resume` function returned by the `useChat` hook. You will typically call this function during the initial mount of the hook. + +``` +'use client' + +export function Chat() { + const { experimental_resume } = useChat({id}); + + useEffect(() => { + experimental_resume(); + + // we use an empty dependency array to + // ensure this effect runs only once + }, []) + + return ( +
+ + +
+ ) +} +``` + +The `experimental_resume` function makes a `GET` request to the api endpoint you've initialized the hook with (or `/api/chat` by default) and streams the contents of the stream if it is active or fails silently if it has ended. + +The `GET` request automatically appends the `chatId` query parameter to the URL to help identify the chat the request belongs to. Using the `chatId`, you can look up the most recent stream ID from the database and resume the stream. As a result, it is important to specify the `id` parameter in the `useChat` hook. + +Earlier, you must've implemented the `POST` method for `/api/chat` route to create new chat generations. When using `experimental_resume`, you must also implement the `GET` method for `/api/chat` route to resume a stream if it is active or fails silently if it has ended. + +``` +export async function GET() { + const { searchParams } = new URL(request.url); + const chatId = searchParams.get('chatId'); + + if (!chatId) { + return new Response('id is required', { status: 400 }); + } + + const streamIds = await loadStreams(chatId); + + if (!streamIds.length) { + return new Response('No streams found', { status: 404 }); + } + + const recentStreamId = streamIds.at(-1); + + if (!recentStreamId) { + return new Response('No recent stream found', { status: 404 }); + } + + const emptyDataStream = createDataStream({ + execute: () => {}, + }); + + return new Response( + await streamContext.resumableStream(recentStreamId, () => emptyDataStream), + ); +} +``` + ## Attachments (Experimental) The `useChat` hook supports sending attachments along with a message as well as rendering them on the client. This can be useful for building applications that involve sending images, files, or other media content to the AI provider. diff --git a/examples/next-openai/.gitignore b/examples/next-openai/.gitignore index 704a855f25d4..d186e8809dfc 100644 --- a/examples/next-openai/.gitignore +++ b/examples/next-openai/.gitignore @@ -34,5 +34,6 @@ yarn-error.log* *.tsbuildinfo next-env.d.ts -# chat persistence +# persistence .chats +.streams diff --git a/examples/next-openai/app/api/use-chat-resume/route.ts b/examples/next-openai/app/api/use-chat-resume/route.ts new file mode 100644 index 000000000000..0afbf26f9557 --- /dev/null +++ b/examples/next-openai/app/api/use-chat-resume/route.ts @@ -0,0 +1,102 @@ +import { + appendMessageToChat, + appendStreamId, + createChat, + loadStreams, + saveChat, +} from '@/util/chat-store'; +import { openai } from '@ai-sdk/openai'; +import { + appendResponseMessages, + createDataStream, + generateId, + Message, + streamText, +} from 'ai'; +import { after } from 'next/server'; +import { createResumableStreamContext } from 'resumable-stream'; + +// Allow streaming responses up to 30 seconds +export const maxDuration = 30; + +const redisUrl = process.env.KV_URL; + +if (!redisUrl) { + throw new Error('KV_URL environment variable is not set'); +} + +const streamContext = createResumableStreamContext({ + waitUntil: after, +}); + +export async function POST(req: Request) { + const { id, messages }: { id: string; messages: Message[] } = + await req.json(); + + const streamId = generateId(); + + const recentUserMessage = messages + .filter(message => message.role === 'user') + .at(-1); + + if (!recentUserMessage) { + throw new Error('No recent user message found'); + } + + await appendMessageToChat({ chatId: id, message: recentUserMessage }); + + await appendStreamId({ chatId: id, streamId }); + + const stream = createDataStream({ + execute: dataStream => { + const result = streamText({ + model: openai('gpt-4o'), + messages, + onFinish: async ({ response }) => { + await saveChat({ + id, + messages: appendResponseMessages({ + messages, + responseMessages: response.messages, + }), + }); + }, + }); + + result.mergeIntoDataStream(dataStream); + }, + }); + + return new Response( + await streamContext.resumableStream(streamId, () => stream), + ); +} + +export async function GET(request: Request) { + const { searchParams } = new URL(request.url); + const chatId = searchParams.get('chatId'); + + if (!chatId) { + return new Response('id is required', { status: 400 }); + } + + const streamIds = await loadStreams(chatId); + + if (!streamIds.length) { + return new Response('No streams found', { status: 404 }); + } + + const recentStreamId = streamIds.at(-1); + + if (!recentStreamId) { + return new Response('No recent stream found', { status: 404 }); + } + + const emptyDataStream = createDataStream({ + execute: () => {}, + }); + + return new Response( + await streamContext.resumableStream(recentStreamId, () => emptyDataStream), + ); +} diff --git a/examples/next-openai/app/use-chat-resume/[id]/page.tsx b/examples/next-openai/app/use-chat-resume/[id]/page.tsx new file mode 100644 index 000000000000..85ac133795d7 --- /dev/null +++ b/examples/next-openai/app/use-chat-resume/[id]/page.tsx @@ -0,0 +1,14 @@ +import { loadChat } from '@/util/chat-store'; +import { Chat } from '../chat'; + +export default async function Page({ + params, +}: { + params: Promise<{ id: string }>; +}) { + const { id } = await params; + + const messages = await loadChat(id); + + return ; +} diff --git a/examples/next-openai/app/use-chat-resume/chat.tsx b/examples/next-openai/app/use-chat-resume/chat.tsx new file mode 100644 index 000000000000..e920eae6c807 --- /dev/null +++ b/examples/next-openai/app/use-chat-resume/chat.tsx @@ -0,0 +1,109 @@ +'use client'; + +import { useChat } from '@ai-sdk/react'; +import { Message } from 'ai'; +import Link from 'next/link'; +import { useEffect } from 'react'; + +export function Chat({ + chatId, + autoResume, + initialMessages = [], +}: { + chatId: string; + autoResume: boolean; + initialMessages: Message[]; +}) { + const { + error, + input, + status, + handleInputChange, + handleSubmit, + messages, + reload, + stop, + experimental_resume, + } = useChat({ + id: chatId, + api: '/api/use-chat-resume', + initialMessages, + sendExtraMessageFields: true, + onError: error => { + console.error('Error streaming text:', error); + }, + }); + + useEffect(() => { + if (autoResume) { + experimental_resume(); + } + }, []); + + return ( +
+ + Chat Id: {chatId} + + +
Status: {status}
+ + {messages.map(message => ( +
+
+ {message.role === 'user' ? 'User: ' : 'AI: '} +
+ +
+
{message.id}
+ {message.parts + .filter(part => part.type !== 'source') + .map((part, partIndex) => { + if (part.type === 'text') { + return ( +
{part.text}
+ ); + } + })} +
+
+ ))} + + {(status === 'submitted' || status === 'streaming') && ( +
+ {status === 'submitted' &&
Loading...
} + +
+ )} + + {error && ( +
+
An error occurred.
+ +
+ )} + +
+ +
+
+ ); +} diff --git a/examples/next-openai/app/use-chat-resume/page.tsx b/examples/next-openai/app/use-chat-resume/page.tsx new file mode 100644 index 000000000000..dc60255f6769 --- /dev/null +++ b/examples/next-openai/app/use-chat-resume/page.tsx @@ -0,0 +1,8 @@ +import { Chat } from './chat'; +import { generateId } from 'ai'; + +export default function Page() { + const chatId = generateId(32); + + return ; +} diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json index a673f7596fb4..792620118761 100644 --- a/examples/next-openai/package.json +++ b/examples/next-openai/package.json @@ -10,14 +10,22 @@ }, "dependencies": { "@ai-sdk/anthropic": "1.2.10", +<<<<<<< Updated upstream "@ai-sdk/deepseek": "0.2.13", "@ai-sdk/fireworks": "0.2.13", "@ai-sdk/openai": "1.3.19", "@ai-sdk/google": "1.2.13", "@ai-sdk/google-vertex": "2.2.17", +======= + "@ai-sdk/deepseek": "0.2.12", + "@ai-sdk/fireworks": "0.2.12", + "@ai-sdk/google": "1.2.12", + "@ai-sdk/google-vertex": "2.2.16", + "@ai-sdk/openai": "1.3.16", +>>>>>>> Stashed changes "@ai-sdk/perplexity": "1.1.7", - "@ai-sdk/ui-utils": "1.2.8", "@ai-sdk/react": "1.2.9", + "@ai-sdk/ui-utils": "1.2.8", "@vercel/blob": "^0.26.0", "ai": "4.3.10", "next": "latest", @@ -25,6 +33,8 @@ "react": "^18", "react-dom": "^18", "react-markdown": "9.0.1", + "redis": "^4.7.0", + "resumable-stream": "^2.0.0", "zod": "3.23.8" }, "devDependencies": { diff --git a/examples/next-openai/util/chat-store.ts b/examples/next-openai/util/chat-store.ts index 6f58a23edcba..319ab3e299c4 100644 --- a/examples/next-openai/util/chat-store.ts +++ b/examples/next-openai/util/chat-store.ts @@ -1,5 +1,5 @@ import { generateId, Message } from 'ai'; -import { existsSync, mkdirSync } from 'fs'; +import { existsSync, mkdirSync, writeFileSync } from 'fs'; import { readFile, writeFile } from 'fs/promises'; import path from 'path'; @@ -23,12 +23,58 @@ export async function saveChat({ await writeFile(getChatFile(id), JSON.stringify(messages, null, 2)); } +export async function appendMessageToChat({ + chatId, + message, +}: { + chatId: string; + message: Message; +}): Promise { + const file = getChatFile(chatId); + const messages = await loadChat(chatId); + messages.push(message); + await writeFile(file, JSON.stringify(messages, null, 2)); +} + export async function loadChat(id: string): Promise { return JSON.parse(await readFile(getChatFile(id), 'utf8')); } function getChatFile(id: string): string { const chatDir = path.join(process.cwd(), '.chats'); + + if (!existsSync(chatDir)) mkdirSync(chatDir, { recursive: true }); + + const chatFile = path.join(chatDir, `${id}.json`); + + if (!existsSync(chatFile)) { + writeFileSync(chatFile, '[]'); + } + + return chatFile; +} + +export async function appendStreamId({ + chatId, + streamId, +}: { + chatId: string; + streamId: string; +}) { + const file = getStreamsFile(chatId); + const streams = await loadStreams(chatId); + streams.push(streamId); + await writeFile(file, JSON.stringify(streams, null, 2)); +} + +export async function loadStreams(chatId: string): Promise { + const file = getStreamsFile(chatId); + if (!existsSync(file)) return []; + return JSON.parse(await readFile(file, 'utf8')); +} + +function getStreamsFile(chatId: string): string { + const chatDir = path.join(process.cwd(), '.streams'); if (!existsSync(chatDir)) mkdirSync(chatDir, { recursive: true }); - return path.join(chatDir, `${id}.json`); + return path.join(chatDir, `${chatId}.json`); } diff --git a/packages/react/src/use-chat.ts b/packages/react/src/use-chat.ts index 86b0720fbf86..ef0bda759d84 100644 --- a/packages/react/src/use-chat.ts +++ b/packages/react/src/use-chat.ts @@ -9,6 +9,7 @@ import type { } from '@ai-sdk/ui-utils'; import { callChatApi, + resumeChatApi, extractMaxToolInvocationStep, fillMessageParts, generateId as generateIdFunc, @@ -52,6 +53,12 @@ export type UseChatHelpers = { * Abort the current request immediately, keep the generated tokens if any. */ stop: () => void; + + /** + * Resume an ongoing chat generation stream. This does not resume an aborted generation. + */ + experimental_resume: () => Promise; + /** * Update the `messages` state locally. This is useful when you want to * edit the messages on the client, and then trigger the `reload` method @@ -400,6 +407,100 @@ By default, it's set to 1, which means that only a single LLM call is made. ], ); + const triggerResumeRequest = useCallback(async () => { + const body = { + id: chatId, + messages: messagesRef.current, + }; + + try { + const abortController = new AbortController(); + abortControllerRef.current = abortController; + + const throttledMutate = throttle(mutate, throttleWaitMs); + const throttledMutateStreamData = throttle( + mutateStreamData, + throttleWaitMs, + ); + + const previousMessages = messagesRef.current; + const chatMessages = fillMessageParts(previousMessages); + + const existingData = streamDataRef.current; + + await resumeChatApi({ + api, + body, + fetch, + onResponse, + restoreMessagesOnFailure() { + if (!keepLastMessageOnError) { + throttledMutate(previousMessages, false); + } + }, + streamProtocol, + onUpdate({ message, data, replaceLastMessage }) { + mutateStatus('streaming'); + + throttledMutate( + [ + ...(replaceLastMessage + ? chatMessages.slice(0, chatMessages.length - 1) + : chatMessages), + message, + ], + false, + ); + + if (data?.length) { + throttledMutateStreamData( + [...(existingData ?? []), ...data], + false, + ); + } + }, + onFinish, + onToolCall, + generateId, + lastMessage: chatMessages[chatMessages.length - 1], + }); + + abortControllerRef.current = null; + + mutateStatus('ready'); + } catch (error) { + // Ignore abort errors as they are expected. + if ((error as any).name === 'AbortError') { + abortControllerRef.current = null; + mutateStatus('ready'); + return null; + } + + if (onError && error instanceof Error) { + onError(error); + } + + setError(error as Error); + mutateStatus('error'); + } + }, [ + api, + chatId, + fetch, + generateId, + keepLastMessageOnError, + mutate, + mutateStatus, + mutateStreamData, + onFinish, + onResponse, + onToolCall, + streamProtocol, + throttleWaitMs, + onError, + setError, + ]); + const append = useCallback( async ( message: Message | CreateMessage, @@ -456,6 +557,10 @@ By default, it's set to 1, which means that only a single LLM call is made. } }, []); + const experimental_resume = useCallback(async () => { + return triggerResumeRequest(); + }, [triggerResumeRequest]); + const setMessages = useCallback( (messages: Message[] | ((messages: Message[]) => Message[])) => { if (typeof messages === 'function') { @@ -581,6 +686,7 @@ By default, it's set to 1, which means that only a single LLM call is made. append, reload, stop, + experimental_resume, input, setInput, handleInputChange, diff --git a/packages/react/src/use-chat.ui.test.tsx b/packages/react/src/use-chat.ui.test.tsx index 60823b960082..3b9413c75bc2 100644 --- a/packages/react/src/use-chat.ui.test.tsx +++ b/packages/react/src/use-chat.ui.test.tsx @@ -1855,3 +1855,53 @@ describe('initialMessages', () => { }); }); }); + +describe('resume ongoing stream and return assistant message', () => { + setupTestComponent( + () => { + const { messages, status, experimental_resume } = useChat({ + id: '123', + initialMessages: [{ id: 'msg_123', role: 'user', content: 'hi' }], + }); + + useEffect(() => { + experimental_resume(); + }, []); + + return ( +
+ {messages.map((m, idx) => ( +
+ {m.role === 'user' ? 'User: ' : 'AI: '} + {m.content} +
+ ))} + +
{status}
+
+ ); + }, + { + init: TestComponent => { + server.urls['/api/chat'].response = [ + { + type: 'stream-chunks', + chunks: ['0:"Hello"\n', '0:"," \n', '0:" world"\n', '0:"."\n'], + }, + ]; + + return ; + }, + }, + ); + + it('construct messages from resumed stream', async () => { + await screen.findByTestId('message-0'); + expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); + + await screen.findByTestId('message-1'); + expect(screen.getByTestId('message-1')).toHaveTextContent( + 'AI: Hello, world.', + ); + }); +}); diff --git a/packages/ui-utils/src/call-chat-api.ts b/packages/ui-utils/src/call-chat-api.ts index 80fc1636f8eb..188f62540e77 100644 --- a/packages/ui-utils/src/call-chat-api.ts +++ b/packages/ui-utils/src/call-chat-api.ts @@ -106,3 +106,91 @@ export async function callChatApi({ } } } + +export async function resumeChatApi({ + api, + body, + fetch = getOriginalFetch(), + onResponse, + restoreMessagesOnFailure, + streamProtocol = 'data', + onUpdate, + onFinish, + onToolCall, + generateId, + lastMessage, +}: { + api: string; + body: Record; + fetch: ReturnType | undefined; + onResponse: ((response: Response) => void | Promise) | undefined; + restoreMessagesOnFailure: () => void; + streamProtocol: 'data' | 'text' | undefined; + onUpdate: (options: { + message: UIMessage; + data: JSONValue[] | undefined; + replaceLastMessage: boolean; + }) => void; + onFinish: UseChatOptions['onFinish']; + onToolCall: UseChatOptions['onToolCall']; + generateId: IdGenerator; + lastMessage: UIMessage | undefined; +}) { + const { id } = body; + + const response = await fetch(`${api}?chatId=${id}`, { + method: 'GET', + }); + + if (onResponse) { + try { + await onResponse(response); + } catch (err) { + throw err; + } + } + + if (!response.ok) { + restoreMessagesOnFailure(); + throw new Error( + (await response.text()) ?? 'Failed to fetch the chat response.', + ); + } + + if (!response.body) { + throw new Error('The response body is empty.'); + } + + switch (streamProtocol) { + case 'text': { + await processChatTextResponse({ + stream: response.body, + update: onUpdate, + onFinish, + generateId, + }); + return; + } + + case 'data': { + await processChatResponse({ + stream: response.body, + update: onUpdate, + lastMessage, + onToolCall, + onFinish({ message, finishReason, usage }) { + if (onFinish && message != null) { + onFinish(message, { usage, finishReason }); + } + }, + generateId, + }); + return; + } + + default: { + const exhaustiveCheck: never = streamProtocol; + throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`); + } + } +} diff --git a/packages/ui-utils/src/index.ts b/packages/ui-utils/src/index.ts index 12ba2071ccb6..de388ff3cdf3 100644 --- a/packages/ui-utils/src/index.ts +++ b/packages/ui-utils/src/index.ts @@ -13,7 +13,7 @@ export type { AssistantStreamPart, AssistantStreamString, } from './assistant-stream-parts'; -export { callChatApi } from './call-chat-api'; +export { callChatApi, resumeChatApi } from './call-chat-api'; export { callCompletionApi } from './call-completion-api'; export { formatDataStreamPart, parseDataStreamPart } from './data-stream-parts'; export type { DataStreamPart, DataStreamString } from './data-stream-parts'; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index bfabe5a8e274..0bfa34f793b7 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -343,7 +343,7 @@ importers: version: 10.2.3(chokidar@3.6.0)(typescript@5.5.4) '@nestjs/testing': specifier: ^10.4.12 - version: 10.4.12(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2)(@nestjs/platform-express@10.4.9) + version: 10.4.12(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/platform-express@10.4.9)(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/platform-express@10.4.9(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2)) '@types/express': specifier: ^5.0.0 version: 5.0.0 @@ -513,7 +513,7 @@ importers: version: link:../../packages/ai langchain: specifier: 0.1.36 - version: 0.1.36(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-runtime@3.663.0)(@aws-sdk/credential-provider-node@3.662.0(@aws-sdk/client-sso-oidc@3.662.0(@aws-sdk/client-sts@3.662.0))(@aws-sdk/client-sts@3.662.0))(@smithy/util-utf8@2.3.0)(@upstash/redis@1.34.3)(@vercel/kv@0.2.4)(fast-xml-parser@4.4.1)(ignore@5.3.2)(ioredis@5.4.1)(jsdom@26.0.0)(lodash@4.17.21)(openai@4.52.6)(playwright@1.50.1)(ws@8.18.0) + version: 0.1.36(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-runtime@3.663.0)(@aws-sdk/credential-provider-node@3.662.0)(@smithy/util-utf8@2.3.0)(@upstash/redis@1.34.3)(@vercel/kv@0.2.4)(fast-xml-parser@4.4.1)(ignore@5.3.2)(ioredis@5.4.1)(jsdom@26.0.0)(lodash@4.17.21)(openai@4.52.6)(playwright@1.50.1)(redis@4.7.0)(ws@8.18.0) next: specifier: latest version: 15.2.2(@opentelemetry/api@1.9.0)(@playwright/test@1.50.1)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) @@ -602,6 +602,12 @@ importers: react-markdown: specifier: 9.0.1 version: 9.0.1(@types/react@18.3.3)(react@18.3.1) + redis: + specifier: ^4.7.0 + version: 4.7.0 + resumable-stream: + specifier: ^2.0.0 + version: 2.0.0 zod: specifier: 3.23.8 version: 3.23.8 @@ -644,7 +650,7 @@ importers: version: link:../../packages/react '@vercel/functions': specifier: latest - version: 2.0.0(@aws-sdk/credential-provider-web-identity@3.662.0(@aws-sdk/client-sts@3.662.0)) + version: 2.0.0(@aws-sdk/credential-provider-web-identity@3.662.0) ai: specifier: 4.3.10 version: link:../../packages/ai @@ -1011,7 +1017,7 @@ importers: version: 3.5.12 nuxt: specifier: 3.14.159 - version: 3.14.159(@parcel/watcher@2.4.1)(@types/node@20.17.24)(@upstash/redis@1.34.3)(eslint@9.21.0(jiti@2.4.0))(ioredis@5.4.1)(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(webpack-sources@3.2.3) + version: 3.14.159(@parcel/watcher@2.4.1)(@types/node@20.17.24)(@upstash/redis@1.34.3)(eslint@9.21.0)(ioredis@5.4.1)(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(webpack-sources@3.2.3) tailwindcss: specifier: 3.4.15 version: 3.4.15(ts-node@10.9.2(@types/node@20.17.24)(typescript@5.6.3)) @@ -2090,7 +2096,7 @@ importers: version: link:../../tools/tsconfig '@vitejs/plugin-vue': specifier: 5.2.0 - version: 5.2.0(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3)) + version: 5.2.0(vite@5.4.11(@types/node@22.7.4)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3)) eslint: specifier: 8.57.1 version: 8.57.1 @@ -2111,7 +2117,7 @@ importers: version: 5.6.3 vite-plugin-solid: specifier: 2.7.2 - version: 2.7.2(solid-js@1.8.7) + version: 2.7.2(solid-js@1.8.7)(vite@5.4.11(@types/node@22.7.4)(terser@5.31.3)) vitest: specifier: 2.1.4 version: 2.1.4(@edge-runtime/vm@5.0.0)(@types/node@20.17.24)(jsdom@24.0.0)(msw@2.6.4(@types/node@20.17.24)(typescript@5.6.3))(terser@5.31.3) @@ -6441,6 +6447,35 @@ packages: resolution: {integrity: sha512-S+9ANAvUmjutrshV4jZjaiG8XQyuJIZ8a4utWmN/vW1sgQ9IfBnPndwkmQYw53QmouOIytT874u65HEmu6H5jw==} engines: {node: '>=18'} + '@redis/bloom@1.2.0': + resolution: {integrity: sha512-HG2DFjYKbpNmVXsa0keLHp/3leGJz1mjh09f2RLGGLQZzSHpkmZWuwJbAvo3QcRY8p80m5+ZdXZdYOSBLlp7Cg==} + peerDependencies: + '@redis/client': ^1.0.0 + + '@redis/client@1.6.0': + resolution: {integrity: sha512-aR0uffYI700OEEH4gYnitAnv3vzVGXCFvYfdpu/CJKvk4pHfLPEy/JSZyrpQ+15WhXe1yJRXLtfQ84s4mEXnPg==} + engines: {node: '>=14'} + + '@redis/graph@1.1.1': + resolution: {integrity: sha512-FEMTcTHZozZciLRl6GiiIB4zGm5z5F3F6a6FZCyrfxdKOhFlGkiAqlexWMBzCi4DcRoyiOsuLfW+cjlGWyExOw==} + peerDependencies: + '@redis/client': ^1.0.0 + + '@redis/json@1.0.7': + resolution: {integrity: sha512-6UyXfjVaTBTJtKNG4/9Z8PSpKE6XgSyEb8iwaqDcy+uKrd/DGYHTWkUdnQDyzm727V7p21WUMhsqz5oy65kPcQ==} + peerDependencies: + '@redis/client': ^1.0.0 + + '@redis/search@1.2.0': + resolution: {integrity: sha512-tYoDBbtqOVigEDMAcTGsRlMycIIjwMCgD8eR2t0NANeQmgK/lvxNAvYyb6bZDD4frHRhIHkJu2TBRvB0ERkOmw==} + peerDependencies: + '@redis/client': ^1.0.0 + + '@redis/time-series@1.1.0': + resolution: {integrity: sha512-c1Q99M5ljsIuc4YdaCwfUEXsofakb9c8+Zse2qxTadu8TalLXuAESzLvFAvNVbkmSlvlzIQOLpBCmWI9wTOt+g==} + peerDependencies: + '@redis/client': ^1.0.0 + '@redocly/ajv@8.11.2': resolution: {integrity: sha512-io1JpnwtIcvojV7QKDUSIuMN/ikdOUd1ReEnUnMKGfDVridQZ31J0MmIuqwuRjWDZfmvr+Q0MqCcfHM2gTivOg==} @@ -10213,6 +10248,10 @@ packages: peerDependencies: next: '>=13.2.0' + generic-pool@3.9.0: + resolution: {integrity: sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==} + engines: {node: '>= 4'} + gensync@1.0.0-beta.2: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} @@ -13270,6 +13309,9 @@ packages: resolution: {integrity: sha512-DJnGAeenTdpMEH6uAJRK/uiyEIH9WVsUmoLwzudwGJUwZPp80PDBWPHXSAGNPwNvIXAbe7MSUB1zQFugFml66A==} engines: {node: '>=4'} + redis@4.7.0: + resolution: {integrity: sha512-zvmkHEAdGMn+hMRXuMBtu4Vo5P6rHQjLoHftu+lBqq8ZTA3RCVC/WzD790bkKKiNFp7d5/9PcSD19fJyyRvOdQ==} + reflect-metadata@0.2.2: resolution: {integrity: sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==} @@ -13360,6 +13402,9 @@ packages: resolution: {integrity: sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==} engines: {node: '>=18'} + resumable-stream@2.0.0: + resolution: {integrity: sha512-D7E0wDUnfoy+Lerba/gyuD44OG3G0APqDcQ9soMSerujaVujPLWc5sSCLXf/ZFQPreLb3MKDjSm3TOPXpNtpZw==} + ret@0.5.0: resolution: {integrity: sha512-I1XxrZSQ+oErkRR4jYbAyEEu2I0avBvvMM5JN+6EBprOGRCs63ENqZ3vjavq8fBw2+62G5LF5XelKwuJpcvcxw==} engines: {node: '>=10'} @@ -17120,6 +17165,12 @@ snapshots: eslint: 9.21.0(jiti@2.4.0) eslint-visitor-keys: 3.4.3 + '@eslint-community/eslint-utils@4.4.1(eslint@9.21.0)': + dependencies: + eslint: 9.21.0 + eslint-visitor-keys: 3.4.3 + optional: true + '@eslint-community/regexpp@4.11.0': {} '@eslint-community/regexpp@4.12.1': {} @@ -18119,7 +18170,7 @@ snapshots: '@kwsites/promise-deferred@1.1.1': {} - '@langchain/community@0.0.57(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-runtime@3.663.0)(@aws-sdk/credential-provider-node@3.662.0(@aws-sdk/client-sso-oidc@3.662.0(@aws-sdk/client-sts@3.662.0))(@aws-sdk/client-sts@3.662.0))(@smithy/util-utf8@2.3.0)(@upstash/redis@1.34.3)(@vercel/kv@0.2.4)(ioredis@5.4.1)(jsdom@26.0.0)(lodash@4.17.21)(openai@4.52.6)(ws@8.18.0)': + '@langchain/community@0.0.57(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-runtime@3.663.0)(@aws-sdk/credential-provider-node@3.662.0)(@smithy/util-utf8@2.3.0)(@upstash/redis@1.34.3)(@vercel/kv@0.2.4)(ioredis@5.4.1)(jsdom@26.0.0)(lodash@4.17.21)(openai@4.52.6)(redis@4.7.0)(ws@8.18.0)': dependencies: '@langchain/core': 0.1.63(openai@4.52.6) '@langchain/openai': 0.0.28 @@ -18139,6 +18190,7 @@ snapshots: ioredis: 5.4.1 jsdom: 26.0.0 lodash: 4.17.21 + redis: 4.7.0 ws: 8.18.0 transitivePeerDependencies: - encoding @@ -18361,7 +18413,7 @@ snapshots: transitivePeerDependencies: - chokidar - '@nestjs/testing@10.4.12(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2)(@nestjs/platform-express@10.4.9)': + '@nestjs/testing@10.4.12(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/platform-express@10.4.9)(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/platform-express@10.4.9(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/core@10.4.2))': dependencies: '@nestjs/common': 10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1) '@nestjs/core': 10.4.2(@nestjs/common@10.4.15(reflect-metadata@0.2.2)(rxjs@7.8.1))(@nestjs/platform-express@10.4.9)(reflect-metadata@0.2.2)(rxjs@7.8.1) @@ -18671,7 +18723,7 @@ snapshots: '@nuxt/ui-templates@1.3.4': {} - '@nuxt/vite-builder@3.14.159(@types/node@20.17.24)(eslint@9.21.0(jiti@2.4.0))(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vue@3.5.13(typescript@5.6.3))(webpack-sources@3.2.3)': + '@nuxt/vite-builder@3.14.159(@types/node@20.17.24)(eslint@9.21.0)(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vue@3.5.13(typescript@5.6.3))(webpack-sources@3.2.3)': dependencies: '@nuxt/kit': 3.14.159(magicast@0.3.5)(rollup@4.34.9) '@rollup/plugin-replace': 6.0.1(rollup@4.34.9) @@ -18705,7 +18757,7 @@ snapshots: unplugin: 1.15.0(webpack-sources@3.2.3) vite: 5.4.11(@types/node@20.17.24)(terser@5.31.3) vite-node: 2.1.4(@types/node@20.17.24)(terser@5.31.3) - vite-plugin-checker: 0.8.0(eslint@9.21.0(jiti@2.4.0))(optionator@0.9.4)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)) + vite-plugin-checker: 0.8.0(eslint@9.21.0)(optionator@0.9.4)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)) vue: 3.5.13(typescript@5.6.3) vue-bundle-renderer: 2.1.1 transitivePeerDependencies: @@ -19993,6 +20045,32 @@ snapshots: '@publint/pack@0.1.2': {} + '@redis/bloom@1.2.0(@redis/client@1.6.0)': + dependencies: + '@redis/client': 1.6.0 + + '@redis/client@1.6.0': + dependencies: + cluster-key-slot: 1.1.2 + generic-pool: 3.9.0 + yallist: 4.0.0 + + '@redis/graph@1.1.1(@redis/client@1.6.0)': + dependencies: + '@redis/client': 1.6.0 + + '@redis/json@1.0.7(@redis/client@1.6.0)': + dependencies: + '@redis/client': 1.6.0 + + '@redis/search@1.2.0(@redis/client@1.6.0)': + dependencies: + '@redis/client': 1.6.0 + + '@redis/time-series@1.1.0(@redis/client@1.6.0)': + dependencies: + '@redis/client': 1.6.0 + '@redocly/ajv@8.11.2': dependencies: fast-deep-equal: 3.1.3 @@ -21637,7 +21715,7 @@ snapshots: throttleit: 2.1.0 undici: 5.28.4 - '@vercel/functions@2.0.0(@aws-sdk/credential-provider-web-identity@3.662.0(@aws-sdk/client-sts@3.662.0))': + '@vercel/functions@2.0.0(@aws-sdk/credential-provider-web-identity@3.662.0)': optionalDependencies: '@aws-sdk/credential-provider-web-identity': 3.662.0(@aws-sdk/client-sts@3.662.0) @@ -21783,6 +21861,11 @@ snapshots: vite: 5.4.11(@types/node@20.17.24)(terser@5.31.3) vue: 3.5.13(typescript@5.6.3) + '@vitejs/plugin-vue@5.2.0(vite@5.4.11(@types/node@22.7.4)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3))': + dependencies: + vite: 5.4.11(@types/node@22.7.4)(terser@5.31.3) + vue: 3.5.13(typescript@5.6.3) + '@vitejs/plugin-vue@5.2.0(vite@6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0))(vue@3.3.8(typescript@5.6.3))': dependencies: vite: 6.0.3(@types/node@20.17.24)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0) @@ -24096,7 +24179,7 @@ snapshots: debug: 4.4.0(supports-color@9.4.0) enhanced-resolve: 5.17.1 eslint: 8.57.1 - eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) + eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@8.57.1))(eslint@8.57.1) eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) fast-glob: 3.3.2 get-tsconfig: 4.7.2 @@ -24130,7 +24213,7 @@ snapshots: debug: 4.4.0(supports-color@9.4.0) enhanced-resolve: 5.17.1 eslint: 9.21.0(jiti@2.4.0) - eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) + eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)))(eslint@9.21.0(jiti@2.4.0)) eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) fast-glob: 3.3.2 get-tsconfig: 4.7.2 @@ -24142,7 +24225,7 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1): + eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@8.57.1))(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: @@ -24164,7 +24247,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)): + eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)))(eslint@9.21.0(jiti@2.4.0)): dependencies: debug: 3.2.7 optionalDependencies: @@ -24175,7 +24258,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1): + eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@8.57.1))(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: @@ -24197,7 +24280,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)): + eslint-module-utils@2.8.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)))(eslint@9.21.0(jiti@2.4.0)): dependencies: debug: 3.2.7 optionalDependencies: @@ -24218,7 +24301,7 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.1) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.1)(typescript@5.4.5))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@8.57.1))(eslint@8.57.1) hasown: 2.0.2 is-core-module: 2.15.0 is-glob: 4.0.3 @@ -24272,7 +24355,7 @@ snapshots: doctrine: 2.1.0 eslint: 9.21.0(jiti@2.4.0) eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@9.21.0(jiti@2.4.0)) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@9.21.0(jiti@2.4.0))(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@9.21.0(jiti@2.4.0)))(eslint@9.21.0(jiti@2.4.0)) hasown: 2.0.2 is-core-module: 2.15.0 is-glob: 4.0.3 @@ -24498,6 +24581,46 @@ snapshots: transitivePeerDependencies: - supports-color + eslint@9.21.0: + dependencies: + '@eslint-community/eslint-utils': 4.4.1(eslint@9.21.0) + '@eslint-community/regexpp': 4.12.1 + '@eslint/config-array': 0.19.2 + '@eslint/core': 0.12.0 + '@eslint/eslintrc': 3.3.0 + '@eslint/js': 9.21.0 + '@eslint/plugin-kit': 0.2.7 + '@humanfs/node': 0.16.6 + '@humanwhocodes/module-importer': 1.0.1 + '@humanwhocodes/retry': 0.4.2 + '@types/estree': 1.0.6 + '@types/json-schema': 7.0.15 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.6 + debug: 4.4.0(supports-color@9.4.0) + escape-string-regexp: 4.0.0 + eslint-scope: 8.2.0 + eslint-visitor-keys: 4.2.0 + espree: 10.3.0 + esquery: 1.6.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 8.0.0 + find-up: 5.0.0 + glob-parent: 6.0.2 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + json-stable-stringify-without-jsonify: 1.0.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + transitivePeerDependencies: + - supports-color + optional: true + eslint@9.21.0(jiti@2.4.0): dependencies: '@eslint-community/eslint-utils': 4.4.1(eslint@9.21.0(jiti@2.4.0)) @@ -25099,6 +25222,8 @@ snapshots: dependencies: next: 15.2.2(@opentelemetry/api@1.9.0)(@playwright/test@1.50.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + generic-pool@3.9.0: {} + gensync@1.0.0-beta.2: {} get-caller-file@2.0.5: {} @@ -26557,10 +26682,10 @@ snapshots: kolorist@1.8.0: {} - langchain@0.1.36(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-runtime@3.663.0)(@aws-sdk/credential-provider-node@3.662.0(@aws-sdk/client-sso-oidc@3.662.0(@aws-sdk/client-sts@3.662.0))(@aws-sdk/client-sts@3.662.0))(@smithy/util-utf8@2.3.0)(@upstash/redis@1.34.3)(@vercel/kv@0.2.4)(fast-xml-parser@4.4.1)(ignore@5.3.2)(ioredis@5.4.1)(jsdom@26.0.0)(lodash@4.17.21)(openai@4.52.6)(playwright@1.50.1)(ws@8.18.0): + langchain@0.1.36(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-runtime@3.663.0)(@aws-sdk/credential-provider-node@3.662.0)(@smithy/util-utf8@2.3.0)(@upstash/redis@1.34.3)(@vercel/kv@0.2.4)(fast-xml-parser@4.4.1)(ignore@5.3.2)(ioredis@5.4.1)(jsdom@26.0.0)(lodash@4.17.21)(openai@4.52.6)(playwright@1.50.1)(redis@4.7.0)(ws@8.18.0): dependencies: '@anthropic-ai/sdk': 0.9.1 - '@langchain/community': 0.0.57(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-runtime@3.663.0)(@aws-sdk/credential-provider-node@3.662.0(@aws-sdk/client-sso-oidc@3.662.0(@aws-sdk/client-sts@3.662.0))(@aws-sdk/client-sts@3.662.0))(@smithy/util-utf8@2.3.0)(@upstash/redis@1.34.3)(@vercel/kv@0.2.4)(ioredis@5.4.1)(jsdom@26.0.0)(lodash@4.17.21)(openai@4.52.6)(ws@8.18.0) + '@langchain/community': 0.0.57(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-runtime@3.663.0)(@aws-sdk/credential-provider-node@3.662.0)(@smithy/util-utf8@2.3.0)(@upstash/redis@1.34.3)(@vercel/kv@0.2.4)(ioredis@5.4.1)(jsdom@26.0.0)(lodash@4.17.21)(openai@4.52.6)(redis@4.7.0)(ws@8.18.0) '@langchain/core': 0.1.63(openai@4.52.6) '@langchain/openai': 0.0.28 '@langchain/textsplitters': 0.0.2(openai@4.52.6) @@ -26585,6 +26710,7 @@ snapshots: ioredis: 5.4.1 jsdom: 26.0.0 playwright: 1.50.1 + redis: 4.7.0 ws: 8.18.0 transitivePeerDependencies: - '@aws-crypto/sha256-js' @@ -27726,14 +27852,14 @@ snapshots: nuxi@3.15.0: {} - nuxt@3.14.159(@parcel/watcher@2.4.1)(@types/node@20.17.24)(@upstash/redis@1.34.3)(eslint@9.21.0(jiti@2.4.0))(ioredis@5.4.1)(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(webpack-sources@3.2.3): + nuxt@3.14.159(@parcel/watcher@2.4.1)(@types/node@20.17.24)(@upstash/redis@1.34.3)(eslint@9.21.0)(ioredis@5.4.1)(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(webpack-sources@3.2.3): dependencies: '@nuxt/devalue': 2.0.2 '@nuxt/devtools': 1.6.3(rollup@4.34.9)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3))(vue@3.5.13(typescript@5.6.3)) '@nuxt/kit': 3.14.159(magicast@0.3.5)(rollup@4.34.9) '@nuxt/schema': 3.14.159(magicast@0.3.5)(rollup@4.34.9) '@nuxt/telemetry': 2.6.0(magicast@0.3.5)(rollup@4.34.9) - '@nuxt/vite-builder': 3.14.159(@types/node@20.17.24)(eslint@9.21.0(jiti@2.4.0))(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vue@3.5.13(typescript@5.6.3))(webpack-sources@3.2.3) + '@nuxt/vite-builder': 3.14.159(@types/node@20.17.24)(eslint@9.21.0)(magicast@0.3.5)(optionator@0.9.4)(rollup@4.34.9)(terser@5.31.3)(typescript@5.6.3)(vue@3.5.13(typescript@5.6.3))(webpack-sources@3.2.3) '@unhead/dom': 1.11.11 '@unhead/shared': 1.11.11 '@unhead/ssr': 1.11.11 @@ -28927,6 +29053,15 @@ snapshots: dependencies: redis-errors: 1.2.0 + redis@4.7.0: + dependencies: + '@redis/bloom': 1.2.0(@redis/client@1.6.0) + '@redis/client': 1.6.0 + '@redis/graph': 1.1.1(@redis/client@1.6.0) + '@redis/json': 1.0.7(@redis/client@1.6.0) + '@redis/search': 1.2.0(@redis/client@1.6.0) + '@redis/time-series': 1.1.0(@redis/client@1.6.0) + reflect-metadata@0.2.2: {} reflect.getprototypeof@1.0.4: @@ -29036,6 +29171,8 @@ snapshots: onetime: 7.0.0 signal-exit: 4.1.0 + resumable-stream@2.0.0: {} + ret@0.5.0: {} retry@0.13.1: {} @@ -30977,7 +31114,7 @@ snapshots: - tsx - yaml - vite-plugin-checker@0.8.0(eslint@9.21.0(jiti@2.4.0))(optionator@0.9.4)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)): + vite-plugin-checker@0.8.0(eslint@9.21.0)(optionator@0.9.4)(typescript@5.6.3)(vite@5.4.11(@types/node@20.17.24)(terser@5.31.3)): dependencies: '@babel/code-frame': 7.26.2 ansi-escapes: 4.3.2 @@ -30995,7 +31132,7 @@ snapshots: vscode-languageserver-textdocument: 1.0.11 vscode-uri: 3.0.8 optionalDependencies: - eslint: 9.21.0(jiti@2.4.0) + eslint: 9.21.0 optionator: 0.9.4 typescript: 5.6.3 @@ -31032,7 +31169,7 @@ snapshots: transitivePeerDependencies: - supports-color - vite-plugin-solid@2.7.2(solid-js@1.8.7): + vite-plugin-solid@2.7.2(solid-js@1.8.7)(vite@5.4.11(@types/node@22.7.4)(terser@5.31.3)): dependencies: '@babel/core': 7.26.0 '@babel/preset-typescript': 7.26.0(@babel/core@7.26.0) @@ -31041,7 +31178,8 @@ snapshots: merge-anything: 5.1.7 solid-js: 1.8.7 solid-refresh: 0.5.3(solid-js@1.8.7) - vitefu: 0.2.5(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)) + vite: 5.4.11(@types/node@22.7.4)(terser@5.31.3) + vitefu: 0.2.5(vite@5.4.11(@types/node@22.7.4)(terser@5.31.3)) transitivePeerDependencies: - supports-color @@ -31106,6 +31244,10 @@ snapshots: tsx: 4.19.2 yaml: 2.7.0 + vitefu@0.2.5(vite@5.4.11(@types/node@22.7.4)(terser@5.31.3)): + optionalDependencies: + vite: 5.4.11(@types/node@22.7.4)(terser@5.31.3) + vitefu@0.2.5(vite@6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0)): optionalDependencies: vite: 6.0.3(@types/node@22.7.4)(jiti@2.4.0)(terser@5.31.3)(tsx@4.19.2)(yaml@2.7.0) diff --git a/turbo.json b/turbo.json index a3b932e805d1..f314f3aa1b40 100644 --- a/turbo.json +++ b/turbo.json @@ -1,9 +1,14 @@ { "$schema": "https://turbo.build/schema.json", - "globalEnv": ["CI", "PORT"], + "globalEnv": [ + "CI", + "PORT" + ], "tasks": { "build": { - "dependsOn": ["^build"], + "dependsOn": [ + "^build" + ], "env": [ "ANTHROPIC_API_KEY", "ASSISTANT_ID", @@ -42,7 +47,8 @@ "SENTRY_PROJECT", "TOGETHER_AI_API_KEY", "VERCEL_URL", - "XAI_API_KEY" + "XAI_API_KEY", + "KV_URL" ], "outputs": [ "dist/**", @@ -54,19 +60,32 @@ ] }, "lint": { - "dependsOn": ["^lint"] + "dependsOn": [ + "^lint" + ] }, "type-check": { - "dependsOn": ["^build", "build"] + "dependsOn": [ + "^build", + "build" + ] }, "test": { - "dependsOn": ["^build", "build"] + "dependsOn": [ + "^build", + "build" + ] }, "publint": { - "dependsOn": ["^build", "build"] + "dependsOn": [ + "^build", + "build" + ] }, "clean": { - "dependsOn": ["^clean"] + "dependsOn": [ + "^clean" + ] }, "dev": { "cache": false, @@ -74,7 +93,10 @@ }, "prettier-check": {}, "integration-test": { - "dependsOn": ["^build", "build"] + "dependsOn": [ + "^build", + "build" + ] } } } From 6e15bffb798909dccef800d59f58c99b77144f9b Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Fri, 25 Apr 2025 09:35:11 -0700 Subject: [PATCH 02/20] resolve merge conflicts --- examples/next-openai/package.json | 8 -------- 1 file changed, 8 deletions(-) diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json index 792620118761..c23f630f1fee 100644 --- a/examples/next-openai/package.json +++ b/examples/next-openai/package.json @@ -10,19 +10,11 @@ }, "dependencies": { "@ai-sdk/anthropic": "1.2.10", -<<<<<<< Updated upstream "@ai-sdk/deepseek": "0.2.13", "@ai-sdk/fireworks": "0.2.13", "@ai-sdk/openai": "1.3.19", "@ai-sdk/google": "1.2.13", "@ai-sdk/google-vertex": "2.2.17", -======= - "@ai-sdk/deepseek": "0.2.12", - "@ai-sdk/fireworks": "0.2.12", - "@ai-sdk/google": "1.2.12", - "@ai-sdk/google-vertex": "2.2.16", - "@ai-sdk/openai": "1.3.16", ->>>>>>> Stashed changes "@ai-sdk/perplexity": "1.1.7", "@ai-sdk/react": "1.2.9", "@ai-sdk/ui-utils": "1.2.8", From fd753e4a973d8dfad830982d53d1e0d1e09fa6b5 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Fri, 25 Apr 2025 09:47:32 -0700 Subject: [PATCH 03/20] remove KV_URL --- examples/next-openai/app/api/use-chat-resume/route.ts | 6 ------ turbo.json | 3 +-- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/examples/next-openai/app/api/use-chat-resume/route.ts b/examples/next-openai/app/api/use-chat-resume/route.ts index 0afbf26f9557..55823bbf444c 100644 --- a/examples/next-openai/app/api/use-chat-resume/route.ts +++ b/examples/next-openai/app/api/use-chat-resume/route.ts @@ -19,12 +19,6 @@ import { createResumableStreamContext } from 'resumable-stream'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; -const redisUrl = process.env.KV_URL; - -if (!redisUrl) { - throw new Error('KV_URL environment variable is not set'); -} - const streamContext = createResumableStreamContext({ waitUntil: after, }); diff --git a/turbo.json b/turbo.json index f314f3aa1b40..771281cd8df5 100644 --- a/turbo.json +++ b/turbo.json @@ -47,8 +47,7 @@ "SENTRY_PROJECT", "TOGETHER_AI_API_KEY", "VERCEL_URL", - "XAI_API_KEY", - "KV_URL" + "XAI_API_KEY" ], "outputs": [ "dist/**", From 7dad719a1f1ae4da13154ba402daeafb4a0fa593 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Fri, 25 Apr 2025 10:45:11 -0700 Subject: [PATCH 04/20] move stream context creation inside handler --- examples/next-openai/.env.local.example | 2 ++ .../next-openai/app/api/use-chat-resume/route.ts | 12 ++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/examples/next-openai/.env.local.example b/examples/next-openai/.env.local.example index 7de83b351115..bd742868814f 100644 --- a/examples/next-openai/.env.local.example +++ b/examples/next-openai/.env.local.example @@ -13,3 +13,5 @@ BLOB_READ_WRITE_TOKEN=xxxxxxx # Required for reasoning example DEEPSEEK_API_KEY=xxxxxxx +# Required for resumable streams. You can create a Redis store here: https://vercel.com/marketplace/redis +REDIS_URL=xxxxxx diff --git a/examples/next-openai/app/api/use-chat-resume/route.ts b/examples/next-openai/app/api/use-chat-resume/route.ts index 55823bbf444c..81ec76632265 100644 --- a/examples/next-openai/app/api/use-chat-resume/route.ts +++ b/examples/next-openai/app/api/use-chat-resume/route.ts @@ -19,11 +19,11 @@ import { createResumableStreamContext } from 'resumable-stream'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; -const streamContext = createResumableStreamContext({ - waitUntil: after, -}); - export async function POST(req: Request) { + const streamContext = createResumableStreamContext({ + waitUntil: after, + }); + const { id, messages }: { id: string; messages: Message[] } = await req.json(); @@ -67,6 +67,10 @@ export async function POST(req: Request) { } export async function GET(request: Request) { + const streamContext = createResumableStreamContext({ + waitUntil: after, + }); + const { searchParams } = new URL(request.url); const chatId = searchParams.get('chatId'); From af232eea5d0a29ae55d660b2ec933dd72a5f5466 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Fri, 25 Apr 2025 11:05:30 -0700 Subject: [PATCH 05/20] update test --- packages/react/src/use-chat.ui.test.tsx | 32 ++++++++++++++++++++----- 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/packages/react/src/use-chat.ui.test.tsx b/packages/react/src/use-chat.ui.test.tsx index 3b9413c75bc2..e400abf244c2 100644 --- a/packages/react/src/use-chat.ui.test.tsx +++ b/packages/react/src/use-chat.ui.test.tsx @@ -1857,6 +1857,8 @@ describe('initialMessages', () => { }); describe('resume ongoing stream and return assistant message', () => { + const controller = new TestResponseController(); + setupTestComponent( () => { const { messages, status, experimental_resume } = useChat({ @@ -1883,12 +1885,10 @@ describe('resume ongoing stream and return assistant message', () => { }, { init: TestComponent => { - server.urls['/api/chat'].response = [ - { - type: 'stream-chunks', - chunks: ['0:"Hello"\n', '0:"," \n', '0:" world"\n', '0:"."\n'], - }, - ]; + server.urls['/api/chat'].response = { + type: 'controlled-stream', + controller, + }; return ; }, @@ -1899,9 +1899,29 @@ describe('resume ongoing stream and return assistant message', () => { await screen.findByTestId('message-0'); expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); + await waitFor(() => { + expect(screen.getByTestId('status')).toHaveTextContent('ready'); + }); + + controller.write('0:"Hello"\n'); + + await waitFor(() => { + expect(screen.getByTestId('status')).toHaveTextContent('streaming'); + }); + + controller.write('0:"," \n'); + controller.write('0:" world"\n') + controller.write('0:"."\n') + + controller.close() + await screen.findByTestId('message-1'); expect(screen.getByTestId('message-1')).toHaveTextContent( 'AI: Hello, world.', ); + + await waitFor(() => { + expect(screen.getByTestId('status')).toHaveTextContent('ready'); + }); }); }); From 89b2f752299fde919a386e7ac6703df98703fab5 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Fri, 25 Apr 2025 11:06:38 -0700 Subject: [PATCH 06/20] format --- packages/react/src/use-chat.ui.test.tsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/react/src/use-chat.ui.test.tsx b/packages/react/src/use-chat.ui.test.tsx index e400abf244c2..af84aff708fc 100644 --- a/packages/react/src/use-chat.ui.test.tsx +++ b/packages/react/src/use-chat.ui.test.tsx @@ -1910,10 +1910,10 @@ describe('resume ongoing stream and return assistant message', () => { }); controller.write('0:"," \n'); - controller.write('0:" world"\n') - controller.write('0:"."\n') + controller.write('0:" world"\n'); + controller.write('0:"."\n'); - controller.close() + controller.close(); await screen.findByTestId('message-1'); expect(screen.getByTestId('message-1')).toHaveTextContent( From 283eea4c9be5ff808952d7d983e79ece22fc34dc Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Fri, 25 Apr 2025 11:07:54 -0700 Subject: [PATCH 07/20] restore turbo.json --- turbo.json | 37 ++++++++----------------------------- 1 file changed, 8 insertions(+), 29 deletions(-) diff --git a/turbo.json b/turbo.json index 771281cd8df5..a3b932e805d1 100644 --- a/turbo.json +++ b/turbo.json @@ -1,14 +1,9 @@ { "$schema": "https://turbo.build/schema.json", - "globalEnv": [ - "CI", - "PORT" - ], + "globalEnv": ["CI", "PORT"], "tasks": { "build": { - "dependsOn": [ - "^build" - ], + "dependsOn": ["^build"], "env": [ "ANTHROPIC_API_KEY", "ASSISTANT_ID", @@ -59,32 +54,19 @@ ] }, "lint": { - "dependsOn": [ - "^lint" - ] + "dependsOn": ["^lint"] }, "type-check": { - "dependsOn": [ - "^build", - "build" - ] + "dependsOn": ["^build", "build"] }, "test": { - "dependsOn": [ - "^build", - "build" - ] + "dependsOn": ["^build", "build"] }, "publint": { - "dependsOn": [ - "^build", - "build" - ] + "dependsOn": ["^build", "build"] }, "clean": { - "dependsOn": [ - "^clean" - ] + "dependsOn": ["^clean"] }, "dev": { "cache": false, @@ -92,10 +74,7 @@ }, "prettier-check": {}, "integration-test": { - "dependsOn": [ - "^build", - "build" - ] + "dependsOn": ["^build", "build"] } } } From b475105de6281bed5b0ce24d550fd964677ca543 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Sat, 26 Apr 2025 01:23:17 -0700 Subject: [PATCH 08/20] fix eslint error --- examples/next-openai/app/use-chat-resume/chat.tsx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/next-openai/app/use-chat-resume/chat.tsx b/examples/next-openai/app/use-chat-resume/chat.tsx index e920eae6c807..72be3a8f8ba0 100644 --- a/examples/next-openai/app/use-chat-resume/chat.tsx +++ b/examples/next-openai/app/use-chat-resume/chat.tsx @@ -38,6 +38,8 @@ export function Chat({ if (autoResume) { experimental_resume(); } + // We want to disable the exhaustive deps rule here because we only want to run this effect once + // eslint-disable-next-line react-hooks/exhaustive-deps }, []); return ( From 2dd7edad998c1957b28a630290c75d47702b7deb Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Mon, 28 Apr 2025 14:17:44 -0700 Subject: [PATCH 09/20] update docs --- content/docs/04-ai-sdk-ui/02-chatbot.mdx | 71 --------- .../03-chatbot-message-persistence.mdx | 142 ++++++++++++++++++ 2 files changed, 142 insertions(+), 71 deletions(-) diff --git a/content/docs/04-ai-sdk-ui/02-chatbot.mdx b/content/docs/04-ai-sdk-ui/02-chatbot.mdx index 6494a2d06c13..8611213b06ba 100644 --- a/content/docs/04-ai-sdk-ui/02-chatbot.mdx +++ b/content/docs/04-ai-sdk-ui/02-chatbot.mdx @@ -634,77 +634,6 @@ messages.map(message => ( )); ``` -## Resumable Streams (Experimental) - -The `useChat` hook has experimental support for resuming an ongoing chat generation stream by any client, either after a network disconnect or reloading the chat. This can be useful for building applications that involve long-running conversations or for ensuring that messages are not lost in case of network failures. - -The following are the pre-requisities for your chat application to support resumable streams: - -- Installing the [`resumable-stream`](https://www.npmjs.com/package/resumable-stream) package that helps create and manage the publisher/subscriber mechanism of the streams. -- A [Redis](https://vercel.com/marketplace/redis) instance to store the stream state. -- A table that tracks the stream IDs associated with a chat. - -To resume a chat stream, you can use the `experimental_resume` function returned by the `useChat` hook. You will typically call this function during the initial mount of the hook. - -``` -'use client' - -export function Chat() { - const { experimental_resume } = useChat({id}); - - useEffect(() => { - experimental_resume(); - - // we use an empty dependency array to - // ensure this effect runs only once - }, []) - - return ( -
- - -
- ) -} -``` - -The `experimental_resume` function makes a `GET` request to the api endpoint you've initialized the hook with (or `/api/chat` by default) and streams the contents of the stream if it is active or fails silently if it has ended. - -The `GET` request automatically appends the `chatId` query parameter to the URL to help identify the chat the request belongs to. Using the `chatId`, you can look up the most recent stream ID from the database and resume the stream. As a result, it is important to specify the `id` parameter in the `useChat` hook. - -Earlier, you must've implemented the `POST` method for `/api/chat` route to create new chat generations. When using `experimental_resume`, you must also implement the `GET` method for `/api/chat` route to resume a stream if it is active or fails silently if it has ended. - -``` -export async function GET() { - const { searchParams } = new URL(request.url); - const chatId = searchParams.get('chatId'); - - if (!chatId) { - return new Response('id is required', { status: 400 }); - } - - const streamIds = await loadStreams(chatId); - - if (!streamIds.length) { - return new Response('No streams found', { status: 404 }); - } - - const recentStreamId = streamIds.at(-1); - - if (!recentStreamId) { - return new Response('No recent stream found', { status: 404 }); - } - - const emptyDataStream = createDataStream({ - execute: () => {}, - }); - - return new Response( - await streamContext.resumableStream(recentStreamId, () => emptyDataStream), - ); -} -``` - ## Attachments (Experimental) The `useChat` hook supports sending attachments along with a message as well as rendering them on the client. This can be useful for building applications that involve sending images, files, or other media content to the AI provider. diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index e722005a3536..7a84f70b5ced 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -325,3 +325,145 @@ When the client reloads the page after a disconnect, the chat will be restored f the case where the client reloads the page after a disconnection, but the streaming is not yet complete. + +## Resuming ongoing streams + +This feature is experimental and may change in future versions. + +The `useChat` hook has experimental support for resuming an ongoing chat generation stream by any client, either after a network disconnect or by reloading the chat page. This can be useful for building applications that involve long-running conversations or for ensuring that messages are not lost in case of network failures. + +The following are the pre-requisities for your chat application to support resumable streams: + +- Installing the [`resumable-stream`](https://www.npmjs.com/package/resumable-stream) package that helps create and manage the publisher/subscriber mechanism of the streams. +- Creating a [Redis](https://vercel.com/marketplace/redis) instance to store the stream state. +- Creating a table that tracks the stream IDs associated with a chat. + +To resume a chat stream, you can use the `experimental_resume` function returned by the `useChat` hook. You will typically call this function during the initial mount of the hook. + +```filename="app/components/chat.tsx" +'use client' + +export function Chat() { + const { experimental_resume } = useChat({id}); + + useEffect(() => { + experimental_resume(); + + // we use an empty dependency array to + // ensure this effect runs only once + }, []) + + return ( +
+ + +
+ ) +} +``` + +The `experimental_resume` function makes a `GET` request to your configured chat endpoint (or `/api/chat` by default) whenever your client calls it. If there’s an active stream, it will pick up where it left off, otherwise it simply finishes without error. + +The `GET` request automatically appends the `chatId` query parameter to the URL to help identify the chat the request belongs to. Using the `chatId`, you can look up the most recent stream ID from the database and resume the stream. + +``` +GET /api/chat?chatId= +``` + +Earlier, you must've implemented the `POST` handler for the `/api/chat` route to create new chat generations. When using `experimental_resume`, you must also implement the `GET` handler for `/api/chat` route to resume streams. + +### 1. Implement the GET handler + +Add a `GET` method to `/api/chat` that: + +1. Reads `chatId` from the query string +2. Validates it’s present +3. Loads any stored stream IDs for that chat +4. Returns the latest one to `streamContext.resumableStream()` +5. Falls back to an empty stream if it’s already closed + +```filename="app/api/chat/route.ts" +export async function GET() { + const { searchParams } = new URL(request.url); + const chatId = searchParams.get('chatId'); + + if (!chatId) { + return new Response('id is required', { status: 400 }); + } + + const streamIds = await loadStreams(chatId); + + if (!streamIds.length) { + return new Response('No streams found', { status: 404 }); + } + + const recentStreamId = streamIds.at(-1); + + if (!recentStreamId) { + return new Response('No recent stream found', { status: 404 }); + } + + const emptyDataStream = createDataStream({ + execute: () => {}, + }); + + return new Response( + await streamContext.resumableStream(recentStreamId, () => emptyDataStream), + ); +} +``` + +After you've implemented the `GET` handler, you can update the `POST` handler to handle the creation of resumable streams. + +### 2. Update the POST handler + +When you create a brand-new chat completion, you must: + +1. Generate a fresh `streamId` +2. Persist it alongside your `chatId` +3. Kick off a `createDataStream` that pipes tokens as they arrive +4. Hand that new stream to `streamContext.resumableStream()` + +``` +import { createResumableStreamContext } from 'resumable-stream'; + +const streamContext = createResumableStreamContext({ + waitUntil: after, +}); + +async function POST(request: Request) { + const { id, messages } = await req.json(); + const streamId = generateId(); + + // Record this new stream so we can resume later + await appendStreamId({ chatId: id, streamId }); + + // Build the data stream that will emit tokens + const stream = createDataStream({ + execute: dataStream => { + const result = streamText({ + model: openai('gpt-4o'), + messages, + onFinish: async ({ response }) => { + await saveChat({ + id, + messages: appendResponseMessages({ + messages, + responseMessages: response.messages, + }), + }); + }, + }); + + // Return a resumable stream to the client + result.mergeIntoDataStream(dataStream); + }, + }); + + return new Response( + await streamContext.resumableStream(streamId, () => stream), + ); +} +``` + +With both handlers, your clients can now gracefully resume ongoing streams. From cad77ab02574b9404308136e0e179c40774901b4 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Mon, 28 Apr 2025 14:24:04 -0700 Subject: [PATCH 10/20] add imports --- .../03-chatbot-message-persistence.mdx | 26 +++++++++++++++++++ .../app/api/use-chat-resume/route.ts | 1 - 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index 7a84f70b5ced..f0c8ef40b6dc 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -342,6 +342,9 @@ To resume a chat stream, you can use the `experimental_resume` function returned ```filename="app/components/chat.tsx" 'use client' +import { useChat } from "@ai-sdk/react"; +import { Input } from "@/components/input"; +import { Messages } from "@/components/messages"; export function Chat() { const { experimental_resume } = useChat({id}); @@ -383,6 +386,19 @@ Add a `GET` method to `/api/chat` that: 5. Falls back to an empty stream if it’s already closed ```filename="app/api/chat/route.ts" +import { + loadStreams, +} from '@/util/chat-store'; +import { + createDataStream, +} from 'ai'; +import { after } from 'next/server'; +import { createResumableStreamContext } from 'resumable-stream'; + +const streamContext = createResumableStreamContext({ + waitUntil: after, +}); + export async function GET() { const { searchParams } = new URL(request.url); const chatId = searchParams.get('chatId'); @@ -425,6 +441,16 @@ When you create a brand-new chat completion, you must: 4. Hand that new stream to `streamContext.resumableStream()` ``` +import { + appendResponseMessages, + createDataStream, + generateId, + streamText, +} from 'ai'; +import { + appendStreamId, + saveChat, +} from '@/util/chat-store'; import { createResumableStreamContext } from 'resumable-stream'; const streamContext = createResumableStreamContext({ diff --git a/examples/next-openai/app/api/use-chat-resume/route.ts b/examples/next-openai/app/api/use-chat-resume/route.ts index 81ec76632265..a125503fff4a 100644 --- a/examples/next-openai/app/api/use-chat-resume/route.ts +++ b/examples/next-openai/app/api/use-chat-resume/route.ts @@ -1,7 +1,6 @@ import { appendMessageToChat, appendStreamId, - createChat, loadStreams, saveChat, } from '@/util/chat-store'; From cebcca3e8536a1003677139d4b08ae0efdd99a67 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Mon, 28 Apr 2025 15:35:04 -0700 Subject: [PATCH 11/20] check for request url and method --- packages/react/src/use-chat.ui.test.tsx | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/packages/react/src/use-chat.ui.test.tsx b/packages/react/src/use-chat.ui.test.tsx index af84aff708fc..3703c245a0f3 100644 --- a/packages/react/src/use-chat.ui.test.tsx +++ b/packages/react/src/use-chat.ui.test.tsx @@ -1922,6 +1922,14 @@ describe('resume ongoing stream and return assistant message', () => { await waitFor(() => { expect(screen.getByTestId('status')).toHaveTextContent('ready'); + + expect(server.calls.length).toBeGreaterThan(0); + const { calls } = server; + const [mostRecentCall] = calls; + + const { requestMethod, requestUrl } = mostRecentCall; + expect(requestMethod).toBe('GET'); + expect(requestUrl).toBe('http://localhost:3000/api/chat?chatId=123'); }); }); }); From 64dc1985af28a9c043fff02af52ef5e1ee612bae Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Mon, 28 Apr 2025 15:44:01 -0700 Subject: [PATCH 12/20] update language --- content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index f0c8ef40b6dc..5329c94da5cc 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -338,7 +338,7 @@ The following are the pre-requisities for your chat application to support resum - Creating a [Redis](https://vercel.com/marketplace/redis) instance to store the stream state. - Creating a table that tracks the stream IDs associated with a chat. -To resume a chat stream, you can use the `experimental_resume` function returned by the `useChat` hook. You will typically call this function during the initial mount of the hook. +To resume a chat stream, you will use the `experimental_resume` function returned by the `useChat` hook. You will call this function during the initial mount of the hook inside the main chat component. ```filename="app/components/chat.tsx" 'use client' From 9ab107d1338c928926384835ff5dcbe6617ade7c Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Tue, 29 Apr 2025 13:24:43 -0700 Subject: [PATCH 13/20] update docs and test syntax --- .../docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx | 4 ++-- packages/react/src/use-chat.ui.test.tsx | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index 5329c94da5cc..73b123c939f2 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -385,7 +385,7 @@ Add a `GET` method to `/api/chat` that: 4. Returns the latest one to `streamContext.resumableStream()` 5. Falls back to an empty stream if it’s already closed -```filename="app/api/chat/route.ts" +```ts filename="app/api/chat/route.ts" import { loadStreams, } from '@/util/chat-store'; @@ -440,7 +440,7 @@ When you create a brand-new chat completion, you must: 3. Kick off a `createDataStream` that pipes tokens as they arrive 4. Hand that new stream to `streamContext.resumableStream()` -``` +```ts filename="@/app/api/chat/route.ts" import { appendResponseMessages, createDataStream, diff --git a/packages/react/src/use-chat.ui.test.tsx b/packages/react/src/use-chat.ui.test.tsx index 3703c245a0f3..94ad66959fc1 100644 --- a/packages/react/src/use-chat.ui.test.tsx +++ b/packages/react/src/use-chat.ui.test.tsx @@ -1868,6 +1868,9 @@ describe('resume ongoing stream and return assistant message', () => { useEffect(() => { experimental_resume(); + + // We want to disable the exhaustive deps rule here because we only want to run this effect once + // eslint-disable-next-line react-hooks/exhaustive-deps }, []); return ( @@ -1924,8 +1927,7 @@ describe('resume ongoing stream and return assistant message', () => { expect(screen.getByTestId('status')).toHaveTextContent('ready'); expect(server.calls.length).toBeGreaterThan(0); - const { calls } = server; - const [mostRecentCall] = calls; + const mostRecentCall = server.calls[0]; const { requestMethod, requestUrl } = mostRecentCall; expect(requestMethod).toBe('GET'); From 02d91eccb9791f805c471d9aa444f6a1470b1ca2 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Tue, 29 Apr 2025 13:29:20 -0700 Subject: [PATCH 14/20] specify code snippet language --- content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index 73b123c939f2..a931e6df5ede 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -340,8 +340,9 @@ The following are the pre-requisities for your chat application to support resum To resume a chat stream, you will use the `experimental_resume` function returned by the `useChat` hook. You will call this function during the initial mount of the hook inside the main chat component. -```filename="app/components/chat.tsx" +```tsx filename="app/components/chat.tsx" 'use client' + import { useChat } from "@ai-sdk/react"; import { Input } from "@/components/input"; import { Messages } from "@/components/messages"; @@ -369,7 +370,7 @@ The `experimental_resume` function makes a `GET` request to your configured chat The `GET` request automatically appends the `chatId` query parameter to the URL to help identify the chat the request belongs to. Using the `chatId`, you can look up the most recent stream ID from the database and resume the stream. -``` +```bash GET /api/chat?chatId= ``` From 99f9f6f0020fc702bc62d03f94c47241662bd86c Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Tue, 29 Apr 2025 14:04:59 -0700 Subject: [PATCH 15/20] reuse callChatApi and differentiate by new param --- .../03-chatbot-message-persistence.mdx | 13 +- packages/react/src/use-chat.ts | 107 ++------------- packages/react/src/use-chat.ui.test.tsx | 2 +- packages/ui-utils/src/call-chat-api.ts | 123 ++++-------------- packages/ui-utils/src/index.ts | 2 +- 5 files changed, 39 insertions(+), 208 deletions(-) diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index a931e6df5ede..91b2a7a6649a 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -387,12 +387,8 @@ Add a `GET` method to `/api/chat` that: 5. Falls back to an empty stream if it’s already closed ```ts filename="app/api/chat/route.ts" -import { - loadStreams, -} from '@/util/chat-store'; -import { - createDataStream, -} from 'ai'; +import { loadStreams } from '@/util/chat-store'; +import { createDataStream } from 'ai'; import { after } from 'next/server'; import { createResumableStreamContext } from 'resumable-stream'; @@ -448,10 +444,7 @@ import { generateId, streamText, } from 'ai'; -import { - appendStreamId, - saveChat, -} from '@/util/chat-store'; +import { appendStreamId, saveChat } from '@/util/chat-store'; import { createResumableStreamContext } from 'resumable-stream'; const streamContext = createResumableStreamContext({ diff --git a/packages/react/src/use-chat.ts b/packages/react/src/use-chat.ts index ef0bda759d84..af244d5fd09a 100644 --- a/packages/react/src/use-chat.ts +++ b/packages/react/src/use-chat.ts @@ -9,7 +9,6 @@ import type { } from '@ai-sdk/ui-utils'; import { callChatApi, - resumeChatApi, extractMaxToolInvocationStep, fillMessageParts, generateId as generateIdFunc, @@ -243,7 +242,10 @@ By default, it's set to 1, which means that only a single LLM call is made. }, [credentials, headers, body]); const triggerRequest = useCallback( - async (chatRequest: ChatRequest) => { + async ( + chatRequest: ChatRequest, + requestType: 'generate' | 'resume' = 'generate', + ) => { mutateStatus('submitted'); setError(undefined); @@ -346,6 +348,7 @@ By default, it's set to 1, which means that only a single LLM call is made. generateId, fetch, lastMessage: chatMessages[chatMessages.length - 1], + requestType, }); abortControllerRef.current = null; @@ -407,100 +410,6 @@ By default, it's set to 1, which means that only a single LLM call is made. ], ); - const triggerResumeRequest = useCallback(async () => { - const body = { - id: chatId, - messages: messagesRef.current, - }; - - try { - const abortController = new AbortController(); - abortControllerRef.current = abortController; - - const throttledMutate = throttle(mutate, throttleWaitMs); - const throttledMutateStreamData = throttle( - mutateStreamData, - throttleWaitMs, - ); - - const previousMessages = messagesRef.current; - const chatMessages = fillMessageParts(previousMessages); - - const existingData = streamDataRef.current; - - await resumeChatApi({ - api, - body, - fetch, - onResponse, - restoreMessagesOnFailure() { - if (!keepLastMessageOnError) { - throttledMutate(previousMessages, false); - } - }, - streamProtocol, - onUpdate({ message, data, replaceLastMessage }) { - mutateStatus('streaming'); - - throttledMutate( - [ - ...(replaceLastMessage - ? chatMessages.slice(0, chatMessages.length - 1) - : chatMessages), - message, - ], - false, - ); - - if (data?.length) { - throttledMutateStreamData( - [...(existingData ?? []), ...data], - false, - ); - } - }, - onFinish, - onToolCall, - generateId, - lastMessage: chatMessages[chatMessages.length - 1], - }); - - abortControllerRef.current = null; - - mutateStatus('ready'); - } catch (error) { - // Ignore abort errors as they are expected. - if ((error as any).name === 'AbortError') { - abortControllerRef.current = null; - mutateStatus('ready'); - return null; - } - - if (onError && error instanceof Error) { - onError(error); - } - - setError(error as Error); - mutateStatus('error'); - } - }, [ - api, - chatId, - fetch, - generateId, - keepLastMessageOnError, - mutate, - mutateStatus, - mutateStreamData, - onFinish, - onResponse, - onToolCall, - streamProtocol, - throttleWaitMs, - onError, - setError, - ]); - const append = useCallback( async ( message: Message | CreateMessage, @@ -558,8 +467,10 @@ By default, it's set to 1, which means that only a single LLM call is made. }, []); const experimental_resume = useCallback(async () => { - return triggerResumeRequest(); - }, [triggerResumeRequest]); + const messages = messagesRef.current; + + return triggerRequest({ messages }, 'resume'); + }, [triggerRequest]); const setMessages = useCallback( (messages: Message[] | ((messages: Message[]) => Message[])) => { diff --git a/packages/react/src/use-chat.ui.test.tsx b/packages/react/src/use-chat.ui.test.tsx index 94ad66959fc1..ee76cdff0c18 100644 --- a/packages/react/src/use-chat.ui.test.tsx +++ b/packages/react/src/use-chat.ui.test.tsx @@ -1903,7 +1903,7 @@ describe('resume ongoing stream and return assistant message', () => { expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi'); await waitFor(() => { - expect(screen.getByTestId('status')).toHaveTextContent('ready'); + expect(screen.getByTestId('status')).toHaveTextContent('submitted'); }); controller.write('0:"Hello"\n'); diff --git a/packages/ui-utils/src/call-chat-api.ts b/packages/ui-utils/src/call-chat-api.ts index 188f62540e77..54160b515e48 100644 --- a/packages/ui-utils/src/call-chat-api.ts +++ b/packages/ui-utils/src/call-chat-api.ts @@ -20,6 +20,7 @@ export async function callChatApi({ generateId, fetch = getOriginalFetch(), lastMessage, + requestType = 'generate', }: { api: string; body: Record; @@ -39,17 +40,31 @@ export async function callChatApi({ generateId: IdGenerator; fetch: ReturnType | undefined; lastMessage: UIMessage | undefined; + requestType?: 'generate' | 'resume'; }) { - const response = await fetch(api, { - method: 'POST', - body: JSON.stringify(body), - headers: { - 'Content-Type': 'application/json', - ...headers, - }, - signal: abortController?.()?.signal, - credentials, - }).catch(err => { + const request = + requestType === 'resume' + ? fetch(`${api}?chatId=${body.id}`, { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + ...headers, + }, + signal: abortController?.()?.signal, + credentials, + }) + : fetch(api, { + method: 'POST', + body: JSON.stringify(body), + headers: { + 'Content-Type': 'application/json', + ...headers, + }, + signal: abortController?.()?.signal, + credentials, + }); + + const response = await request.catch(err => { restoreMessagesOnFailure(); throw err; }); @@ -106,91 +121,3 @@ export async function callChatApi({ } } } - -export async function resumeChatApi({ - api, - body, - fetch = getOriginalFetch(), - onResponse, - restoreMessagesOnFailure, - streamProtocol = 'data', - onUpdate, - onFinish, - onToolCall, - generateId, - lastMessage, -}: { - api: string; - body: Record; - fetch: ReturnType | undefined; - onResponse: ((response: Response) => void | Promise) | undefined; - restoreMessagesOnFailure: () => void; - streamProtocol: 'data' | 'text' | undefined; - onUpdate: (options: { - message: UIMessage; - data: JSONValue[] | undefined; - replaceLastMessage: boolean; - }) => void; - onFinish: UseChatOptions['onFinish']; - onToolCall: UseChatOptions['onToolCall']; - generateId: IdGenerator; - lastMessage: UIMessage | undefined; -}) { - const { id } = body; - - const response = await fetch(`${api}?chatId=${id}`, { - method: 'GET', - }); - - if (onResponse) { - try { - await onResponse(response); - } catch (err) { - throw err; - } - } - - if (!response.ok) { - restoreMessagesOnFailure(); - throw new Error( - (await response.text()) ?? 'Failed to fetch the chat response.', - ); - } - - if (!response.body) { - throw new Error('The response body is empty.'); - } - - switch (streamProtocol) { - case 'text': { - await processChatTextResponse({ - stream: response.body, - update: onUpdate, - onFinish, - generateId, - }); - return; - } - - case 'data': { - await processChatResponse({ - stream: response.body, - update: onUpdate, - lastMessage, - onToolCall, - onFinish({ message, finishReason, usage }) { - if (onFinish && message != null) { - onFinish(message, { usage, finishReason }); - } - }, - generateId, - }); - return; - } - - default: { - const exhaustiveCheck: never = streamProtocol; - throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`); - } - } -} diff --git a/packages/ui-utils/src/index.ts b/packages/ui-utils/src/index.ts index de388ff3cdf3..12ba2071ccb6 100644 --- a/packages/ui-utils/src/index.ts +++ b/packages/ui-utils/src/index.ts @@ -13,7 +13,7 @@ export type { AssistantStreamPart, AssistantStreamString, } from './assistant-stream-parts'; -export { callChatApi, resumeChatApi } from './call-chat-api'; +export { callChatApi } from './call-chat-api'; export { callCompletionApi } from './call-completion-api'; export { formatDataStreamPart, parseDataStreamPart } from './data-stream-parts'; export type { DataStreamPart, DataStreamString } from './data-stream-parts'; From 3fc8c24f59e29d042aba7d94d641a355d31dcaa7 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Wed, 30 Apr 2025 18:51:39 -0700 Subject: [PATCH 16/20] add function to reference and update return type --- content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx | 5 +++++ packages/react/src/use-chat.ts | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx b/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx index 3a54c09dd098..e39468051647 100644 --- a/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +++ b/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx @@ -600,6 +600,11 @@ Allows you to easily create a conversational user interface for your chatbot app type: '() => void', description: 'Function to abort the current API request.', }, + { + name: 'experimental_resume', + type: '() => void', + description: 'Function to resume an ongoing chat generation stream.', + } { name: 'setMessages', type: '(messages: Message[] | ((messages: Message[]) => Message[]) => void', diff --git a/packages/react/src/use-chat.ts b/packages/react/src/use-chat.ts index c1a033421d12..3c064f7677fa 100644 --- a/packages/react/src/use-chat.ts +++ b/packages/react/src/use-chat.ts @@ -56,7 +56,7 @@ export type UseChatHelpers = { /** * Resume an ongoing chat generation stream. This does not resume an aborted generation. */ - experimental_resume: () => Promise; + experimental_resume: () => void; /** * Update the `messages` state locally. This is useful when you want to @@ -469,7 +469,7 @@ By default, it's set to 1, which means that only a single LLM call is made. const experimental_resume = useCallback(async () => { const messages = messagesRef.current; - return triggerRequest({ messages }, 'resume'); + triggerRequest({ messages }, 'resume'); }, [triggerRequest]); const setMessages = useCallback( From 9ea0a1b07f41269c7ccf5dd70b8978c378d68784 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Wed, 30 Apr 2025 19:03:42 -0700 Subject: [PATCH 17/20] update code snippet language --- content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index 91b2a7a6649a..ab82f8fbe81b 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -386,7 +386,7 @@ Add a `GET` method to `/api/chat` that: 4. Returns the latest one to `streamContext.resumableStream()` 5. Falls back to an empty stream if it’s already closed -```ts filename="app/api/chat/route.ts" +```typescript filename="app/api/chat/route.ts" import { loadStreams } from '@/util/chat-store'; import { createDataStream } from 'ai'; import { after } from 'next/server'; @@ -437,7 +437,7 @@ When you create a brand-new chat completion, you must: 3. Kick off a `createDataStream` that pipes tokens as they arrive 4. Hand that new stream to `streamContext.resumableStream()` -```ts filename="@/app/api/chat/route.ts" +```typescript filename="@/app/api/chat/route.ts" import { appendResponseMessages, createDataStream, From 45335647c6dd5fcdf8f0949d37b8a6ed52923c56 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Wed, 30 Apr 2025 19:10:14 -0700 Subject: [PATCH 18/20] use tsx as language --- content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index ab82f8fbe81b..1a80b1d69a90 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -386,7 +386,7 @@ Add a `GET` method to `/api/chat` that: 4. Returns the latest one to `streamContext.resumableStream()` 5. Falls back to an empty stream if it’s already closed -```typescript filename="app/api/chat/route.ts" +```tsx filename="app/api/chat/route.ts" import { loadStreams } from '@/util/chat-store'; import { createDataStream } from 'ai'; import { after } from 'next/server'; @@ -437,7 +437,7 @@ When you create a brand-new chat completion, you must: 3. Kick off a `createDataStream` that pipes tokens as they arrive 4. Hand that new stream to `streamContext.resumableStream()` -```typescript filename="@/app/api/chat/route.ts" +```tsx filename="@/app/api/chat/route.ts" import { appendResponseMessages, createDataStream, From fef181cb8b5ae9f1b0afd4cbdbfcccf8cc021f04 Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Wed, 30 Apr 2025 19:14:52 -0700 Subject: [PATCH 19/20] revert to ts --- content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index 1a80b1d69a90..91b2a7a6649a 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -386,7 +386,7 @@ Add a `GET` method to `/api/chat` that: 4. Returns the latest one to `streamContext.resumableStream()` 5. Falls back to an empty stream if it’s already closed -```tsx filename="app/api/chat/route.ts" +```ts filename="app/api/chat/route.ts" import { loadStreams } from '@/util/chat-store'; import { createDataStream } from 'ai'; import { after } from 'next/server'; @@ -437,7 +437,7 @@ When you create a brand-new chat completion, you must: 3. Kick off a `createDataStream` that pipes tokens as they arrive 4. Hand that new stream to `streamContext.resumableStream()` -```tsx filename="@/app/api/chat/route.ts" +```ts filename="@/app/api/chat/route.ts" import { appendResponseMessages, createDataStream, From 05e3ce429f869ea4d1cf6a605fd5d4a5eb9bbdad Mon Sep 17 00:00:00 2001 From: jeremyphilemon Date: Wed, 30 Apr 2025 19:31:58 -0700 Subject: [PATCH 20/20] use consistent path --- content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index 91b2a7a6649a..e690aa6e6495 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -437,7 +437,7 @@ When you create a brand-new chat completion, you must: 3. Kick off a `createDataStream` that pipes tokens as they arrive 4. Hand that new stream to `streamContext.resumableStream()` -```ts filename="@/app/api/chat/route.ts" +```ts filename="app/api/chat/route.ts" import { appendResponseMessages, createDataStream,