From d8e813ff183b3a3906934ad3dbca411e5c9479ab Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Mon, 9 Feb 2026 08:12:34 +0100 Subject: [PATCH 01/16] feat(ai-groq): add Groq adapter package Introduce a new Groq adapter to enable fast LLM inference via Groq's API. Includes TypeScript configuration and Vite build setup for consistent tooling across the AI packages. --- packages/typescript/ai-groq/package.json | 50 ++++++++++++++++++++++ packages/typescript/ai-groq/src/index.ts | 0 packages/typescript/ai-groq/tsconfig.json | 9 ++++ packages/typescript/ai-groq/vite.config.ts | 36 ++++++++++++++++ pnpm-lock.yaml | 21 ++++++++- 5 files changed, 115 insertions(+), 1 deletion(-) create mode 100644 packages/typescript/ai-groq/package.json create mode 100644 packages/typescript/ai-groq/src/index.ts create mode 100644 packages/typescript/ai-groq/tsconfig.json create mode 100644 packages/typescript/ai-groq/vite.config.ts diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json new file mode 100644 index 00000000..c3004834 --- /dev/null +++ b/packages/typescript/ai-groq/package.json @@ -0,0 +1,50 @@ +{ + "name": "@tanstack/ai-groq", + "version": "0.0.1", + "type": "module", + "description": "Groq adapter for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-groq" + }, + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": [ + "ai", + "groq", + "tanstack", + "adapter" + ], + "devDependencies": { + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:^", + "zod": "^4.0.0" + }, + "packageManager": "pnpm@10.17.0" +} diff --git a/packages/typescript/ai-groq/src/index.ts b/packages/typescript/ai-groq/src/index.ts new file mode 100644 index 00000000..e69de29b diff --git a/packages/typescript/ai-groq/tsconfig.json b/packages/typescript/ai-groq/tsconfig.json new file mode 100644 index 00000000..ea11c109 --- /dev/null +++ b/packages/typescript/ai-groq/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src" + }, + "include": ["src/**/*.ts", "src/**/*.tsx"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-groq/vite.config.ts b/packages/typescript/ai-groq/vite.config.ts new file mode 100644 index 00000000..77bcc2e6 --- /dev/null +++ b/packages/typescript/ai-groq/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7b8afb55..6fdabb12 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -720,6 +720,22 @@ importers: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-groq: + dependencies: + '@tanstack/ai': + specifier: workspace:^ + version: link:../ai + zod: + specifier: ^4.0.0 + version: 4.2.1 + devDependencies: + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vite: + specifier: ^7.2.7 + version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-ollama: dependencies: ollama: @@ -5629,6 +5645,7 @@ packages: glob@10.5.0: resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==} + deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me hasBin: true glob@13.0.0: @@ -7777,6 +7794,7 @@ packages: tar@7.5.2: resolution: {integrity: sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==} engines: {node: '>=18'} + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} @@ -8555,6 +8573,7 @@ packages: whatwg-encoding@3.1.1: resolution: {integrity: sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==} engines: {node: '>=18'} + deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation whatwg-fetch@3.6.20: resolution: {integrity: sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==} @@ -17211,7 +17230,7 @@ snapshots: fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 postcss: 8.5.6 - rollup: 4.53.3 + rollup: 4.55.1 tinyglobby: 0.2.15 optionalDependencies: '@types/node': 24.10.3 From ad9c161d874eea53af61ad81c3209491eddcf0f1 Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Mon, 9 Feb 2026 08:35:34 +0100 Subject: [PATCH 02/16] feat(vitest): add Vitest configuration for ai-groq package --- packages/typescript/ai-groq/vitest.config.ts | 22 ++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 packages/typescript/ai-groq/vitest.config.ts diff --git a/packages/typescript/ai-groq/vitest.config.ts b/packages/typescript/ai-groq/vitest.config.ts new file mode 100644 index 00000000..fa253174 --- /dev/null +++ b/packages/typescript/ai-groq/vitest.config.ts @@ -0,0 +1,22 @@ +import { defineConfig } from 'vitest/config' + +export default defineConfig({ + test: { + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) From 2ce45a7b93c3037403c206d824fa9a320dcb01ab Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Mon, 9 Feb 2026 08:36:32 +0100 Subject: [PATCH 03/16] feat: add Groq SDK as dependency and client utilities Introduce the Groq SDK package and add utility functions for creating a Groq client, retrieving the API key from environment variables, and generating prefixed IDs. This also updates the lockfile to include required Groq SDK dependencies. --- package.json | 3 + .../typescript/ai-groq/src/utils/client.ts | 42 +++++++++++ pnpm-lock.yaml | 74 +++++++++++++++++++ 3 files changed, 119 insertions(+) create mode 100644 packages/typescript/ai-groq/src/utils/client.ts diff --git a/package.json b/package.json index 31934812..1bbda7ce 100644 --- a/package.json +++ b/package.json @@ -70,5 +70,8 @@ "typescript": "5.9.3", "vite": "^7.2.7", "vitest": "^4.0.14" + }, + "dependencies": { + "groq-sdk": "^0.37.0" } } diff --git a/packages/typescript/ai-groq/src/utils/client.ts b/packages/typescript/ai-groq/src/utils/client.ts new file mode 100644 index 00000000..f143193d --- /dev/null +++ b/packages/typescript/ai-groq/src/utils/client.ts @@ -0,0 +1,42 @@ +import Groq_SDK from 'groq-sdk' +import type { ClientOptions } from 'groq-sdk' + +export interface GroqClientConfig extends ClientOptions { + apiKey: string +} + +/** + * Creates a Groq SDK client instance + */ +export function createGroqClient(config: GroqClientConfig): Groq_SDK { + return new Groq_SDK(config) +} + +/** + * Gets Groq API key from environment variables + * @throws Error if GROQ_API_KEY is not found + */ +export function getGroqApiKeyFromEnv(): string { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + const key = env?.GROQ_API_KEY + + if (!key) { + throw new Error( + 'GROQ_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + ) + } + + return key +} + +/** + * Generates a unique ID with a prefix + */ +export function generateId(prefix: string): string { + return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 6fdabb12..eeb91152 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -10,6 +10,10 @@ overrides: importers: .: + dependencies: + groq-sdk: + specifier: ^0.37.0 + version: 0.37.0 devDependencies: '@changesets/cli': specifier: ^2.29.8 @@ -4010,9 +4014,15 @@ packages: '@types/ms@2.1.0': resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} + '@types/node-fetch@2.6.13': + resolution: {integrity: sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==} + '@types/node@12.20.55': resolution: {integrity: sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ==} + '@types/node@18.19.130': + resolution: {integrity: sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==} + '@types/node@20.19.26': resolution: {integrity: sha512-0l6cjgF0XnihUpndDhk+nyD3exio3iKaYROSgvh/qSevPXax3L8p5DBRFjbvalnwatGgHEQn2R88y2fA3g4irg==} @@ -4422,6 +4432,10 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} + agentkeepalive@4.6.0: + resolution: {integrity: sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==} + engines: {node: '>= 8.0.0'} + ajv-draft-04@1.0.0: resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==} peerDependencies: @@ -5539,6 +5553,9 @@ packages: resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} engines: {node: '>=14'} + form-data-encoder@1.7.2: + resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} + form-data@4.0.5: resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} engines: {node: '>= 6'} @@ -5548,6 +5565,10 @@ packages: engines: {node: '>=18.3.0'} hasBin: true + formdata-node@4.4.1: + resolution: {integrity: sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==} + engines: {node: '>= 12.20'} + formdata-polyfill@4.0.10: resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} engines: {node: '>=12.20.0'} @@ -5699,6 +5720,9 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + groq-sdk@0.37.0: + resolution: {integrity: sha512-lT72pcT8b/X5XrzdKf+rWVzUGW1OQSKESmL8fFN5cTbsf02gq6oFam4SVeNtzELt9cYE2Pt3pdGgSImuTbHFDg==} + gtoken@8.0.0: resolution: {integrity: sha512-+CqsMbHPiSTdtSO14O51eMNlrp9N79gmeqmXeouJOhfucAedHw9noVe/n5uJk3tbKE6a+6ZCQg3RPhVhHByAIw==} engines: {node: '>=18'} @@ -5854,6 +5878,9 @@ packages: resolution: {integrity: sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==} engines: {node: '>=16.17.0'} + humanize-ms@1.2.1: + resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} + iconv-lite@0.6.3: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} @@ -8010,6 +8037,9 @@ packages: unctx@2.5.0: resolution: {integrity: sha512-p+Rz9x0R7X+CYDkT+Xg8/GhpcShTlU8n+cf9OtOEf7zEQsNcCZO1dPKNRDqvUTaq+P32PMMkxWHwfrxkqfqAYg==} + undici-types@5.26.5: + resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + undici-types@5.28.4: resolution: {integrity: sha512-3OeMF5Lyowe8VW0skf5qaIE7Or3yS9LS7fvMUI0gg4YxpIBVg0L8BxCmROw2CcYhSkpR68Epz7CGc8MPj94Uww==} @@ -8557,6 +8587,10 @@ packages: resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} engines: {node: '>= 8'} + web-streams-polyfill@4.0.0-beta.3: + resolution: {integrity: sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==} + engines: {node: '>= 14'} + web-vitals@5.1.0: resolution: {integrity: sha512-ArI3kx5jI0atlTtmV0fWU3fjpLmq/nD3Zr1iFFlJLaqa5wLBkUSzINwBPySCX/8jRyjlmy1Volw1kz1g9XE4Jg==} @@ -11850,8 +11884,17 @@ snapshots: '@types/ms@2.1.0': {} + '@types/node-fetch@2.6.13': + dependencies: + '@types/node': 24.10.3 + form-data: 4.0.5 + '@types/node@12.20.55': {} + '@types/node@18.19.130': + dependencies: + undici-types: 5.26.5 + '@types/node@20.19.26': dependencies: undici-types: 6.21.0 @@ -12438,6 +12481,10 @@ snapshots: agent-base@7.1.4: {} + agentkeepalive@4.6.0: + dependencies: + humanize-ms: 1.2.1 + ajv-draft-04@1.0.0(ajv@8.13.0): optionalDependencies: ajv: 8.13.0 @@ -13714,6 +13761,8 @@ snapshots: cross-spawn: 7.0.6 signal-exit: 4.1.0 + form-data-encoder@1.7.2: {} + form-data@4.0.5: dependencies: asynckit: 0.4.0 @@ -13726,6 +13775,11 @@ snapshots: dependencies: fd-package-json: 2.0.0 + formdata-node@4.4.1: + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 4.0.0-beta.3 + formdata-polyfill@4.0.10: dependencies: fetch-blob: 3.2.0 @@ -13902,6 +13956,18 @@ snapshots: graceful-fs@4.2.11: {} + groq-sdk@0.37.0: + dependencies: + '@types/node': 18.19.130 + '@types/node-fetch': 2.6.13 + abort-controller: 3.0.0 + agentkeepalive: 4.6.0 + form-data-encoder: 1.7.2 + formdata-node: 4.4.1 + node-fetch: 2.7.0 + transitivePeerDependencies: + - encoding + gtoken@8.0.0: dependencies: gaxios: 7.1.3 @@ -14148,6 +14214,10 @@ snapshots: human-signals@5.0.0: {} + humanize-ms@1.2.1: + dependencies: + ms: 2.1.3 + iconv-lite@0.6.3: dependencies: safer-buffer: 2.1.2 @@ -16819,6 +16889,8 @@ snapshots: magic-string: 0.30.21 unplugin: 2.3.11 + undici-types@5.26.5: {} + undici-types@5.28.4: {} undici-types@6.21.0: {} @@ -17485,6 +17557,8 @@ snapshots: web-streams-polyfill@3.3.3: {} + web-streams-polyfill@4.0.0-beta.3: {} + web-vitals@5.1.0: {} webidl-conversions@3.0.1: {} From 70dfc54cacd9db3fad1a86cceb73f78719512e03 Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Wed, 11 Feb 2026 11:41:49 +0100 Subject: [PATCH 04/16] feat: opus 4.6 model & additional config for provider clients (#278) * feat: opus 4.6 model & additional config for provider clients * fix: isue with gemini adapter --- .changeset/slimy-ways-wave.md | 7 + packages/typescript/ai-anthropic/package.json | 2 +- packages/typescript/ai-anthropic/src/index.ts | 3 +- .../typescript/ai-anthropic/src/model-meta.ts | 89 ++++++-- .../ai-anthropic/src/utils/client.ts | 4 +- .../ai-gemini/src/adapters/summarize.ts | 25 +-- .../typescript/ai-gemini/src/utils/client.ts | 4 +- .../typescript/ai-grok/src/adapters/image.ts | 5 +- packages/typescript/ai-grok/src/index.ts | 3 +- packages/typescript/ai-grok/src/model-meta.ts | 3 + .../typescript/ai-grok/src/utils/client.ts | 5 +- pnpm-lock.yaml | 211 +++++++++++++++--- 12 files changed, 277 insertions(+), 84 deletions(-) create mode 100644 .changeset/slimy-ways-wave.md diff --git a/.changeset/slimy-ways-wave.md b/.changeset/slimy-ways-wave.md new file mode 100644 index 00000000..06404a7d --- /dev/null +++ b/.changeset/slimy-ways-wave.md @@ -0,0 +1,7 @@ +--- +'@tanstack/ai-anthropic': patch +'@tanstack/ai-gemini': patch +'@tanstack/ai-grok': patch +--- + +Add in opus 4.6 and enhance acceptable config options by providers diff --git a/packages/typescript/ai-anthropic/package.json b/packages/typescript/ai-anthropic/package.json index 25c40f36..82d75c66 100644 --- a/packages/typescript/ai-anthropic/package.json +++ b/packages/typescript/ai-anthropic/package.json @@ -40,7 +40,7 @@ "test:types": "tsc" }, "dependencies": { - "@anthropic-ai/sdk": "^0.71.0" + "@anthropic-ai/sdk": "^0.71.2" }, "peerDependencies": { "@tanstack/ai": "workspace:^", diff --git a/packages/typescript/ai-anthropic/src/index.ts b/packages/typescript/ai-anthropic/src/index.ts index b0ff0750..28988f3f 100644 --- a/packages/typescript/ai-anthropic/src/index.ts +++ b/packages/typescript/ai-anthropic/src/index.ts @@ -19,7 +19,7 @@ export { type AnthropicSummarizeConfig, type AnthropicSummarizeProviderOptions, } from './adapters/summarize' - +export { ANTHROPIC_MODELS } from './model-meta' // ============================================================================ // Type Exports // ============================================================================ @@ -27,6 +27,7 @@ export { export type { AnthropicChatModelProviderOptionsByName, AnthropicModelInputModalitiesByName, + AnthropicChatModel, } from './model-meta' export type { AnthropicTextMetadata, diff --git a/packages/typescript/ai-anthropic/src/model-meta.ts b/packages/typescript/ai-anthropic/src/model-meta.ts index fa48e503..1f642b86 100644 --- a/packages/typescript/ai-anthropic/src/model-meta.ts +++ b/packages/typescript/ai-anthropic/src/model-meta.ts @@ -46,19 +46,50 @@ interface ModelMeta< */ messageCapabilities?: TMessageCapabilities } -const CLAUDE_SONNET_4_5 = { - name: 'claude-sonnet-4-5', - id: 'claude-sonnet-4-5', + +const CLAUDE_OPUS_4_6 = { + name: 'claude-opus-4-6', + id: 'claude-opus-4-6', context_window: 200_000, - max_output_tokens: 64_000, - knowledge_cutoff: '2025-09-29', + max_output_tokens: 128_000, + knowledge_cutoff: '2025-05-01', pricing: { input: { - normal: 3, + normal: 5, }, output: { + normal: 25, + }, + }, + supports: { + input: ['text', 'image', 'document'], + extended_thinking: true, + priority_tier: true, + }, +} as const satisfies ModelMeta< + AnthropicContainerOptions & + AnthropicContextManagementOptions & + AnthropicMCPOptions & + AnthropicServiceTierOptions & + AnthropicStopSequencesOptions & + AnthropicThinkingOptions & + AnthropicToolChoiceOptions & + AnthropicSamplingOptions +> + +const CLAUDE_OPUS_4_5 = { + name: 'claude-opus-4-5', + id: 'claude-opus-4-5', + context_window: 200_000, + max_output_tokens: 32_000, + knowledge_cutoff: '2025-11-01', + pricing: { + input: { normal: 15, }, + output: { + normal: 75, + }, }, supports: { input: ['text', 'image', 'document'], @@ -76,18 +107,18 @@ const CLAUDE_SONNET_4_5 = { AnthropicSamplingOptions > -const CLAUDE_HAIKU_4_5 = { - name: 'claude-haiku-4-5', - id: 'claude-haiku-4-5', +const CLAUDE_SONNET_4_5 = { + name: 'claude-sonnet-4-5', + id: 'claude-sonnet-4-5', context_window: 200_000, max_output_tokens: 64_000, - knowledge_cutoff: '2025-10-01', + knowledge_cutoff: '2025-09-29', pricing: { input: { - normal: 1, + normal: 3, }, output: { - normal: 5, + normal: 15, }, }, supports: { @@ -106,18 +137,18 @@ const CLAUDE_HAIKU_4_5 = { AnthropicSamplingOptions > -const CLAUDE_OPUS_4_1 = { - name: 'claude-opus-4-1', - id: 'claude-opus-4-1', +const CLAUDE_HAIKU_4_5 = { + name: 'claude-haiku-4-5', + id: 'claude-haiku-4-5', context_window: 200_000, max_output_tokens: 64_000, - knowledge_cutoff: '2025-08-05', + knowledge_cutoff: '2025-10-01', pricing: { input: { - normal: 15, + normal: 1, }, output: { - normal: 75, + normal: 5, }, }, supports: { @@ -136,12 +167,12 @@ const CLAUDE_OPUS_4_1 = { AnthropicSamplingOptions > -const CLAUDE_OPUS_4_5 = { - name: 'claude-opus-4-5', - id: 'claude-opus-4-5', +const CLAUDE_OPUS_4_1 = { + name: 'claude-opus-4-1', + id: 'claude-opus-4-1', context_window: 200_000, - max_output_tokens: 32_000, - knowledge_cutoff: '2025-11-01', + max_output_tokens: 64_000, + knowledge_cutoff: '2025-08-05', pricing: { input: { normal: 15, @@ -361,6 +392,7 @@ const CLAUDE_HAIKU_3 = { : unknown */ export const ANTHROPIC_MODELS = [ + CLAUDE_OPUS_4_6.id, CLAUDE_OPUS_4_5.id, CLAUDE_SONNET_4_5.id, CLAUDE_HAIKU_4_5.id, @@ -378,11 +410,19 @@ export const ANTHROPIC_MODELS = [ // const ANTHROPIC_VIDEO_MODELS = [] as const /* type AnthropicModel = (typeof ANTHROPIC_MODELS)[number] */ - +export type AnthropicChatModel = (typeof ANTHROPIC_MODELS)[number] // Manual type map for per-model provider options // Models are differentiated by extended_thinking and priority_tier support export type AnthropicChatModelProviderOptionsByName = { // Models with both extended_thinking and priority_tier + [CLAUDE_OPUS_4_6.id]: AnthropicContainerOptions & + AnthropicContextManagementOptions & + AnthropicMCPOptions & + AnthropicServiceTierOptions & + AnthropicStopSequencesOptions & + AnthropicThinkingOptions & + AnthropicToolChoiceOptions & + AnthropicSamplingOptions [CLAUDE_OPUS_4_5.id]: AnthropicContainerOptions & AnthropicContextManagementOptions & AnthropicMCPOptions & @@ -470,6 +510,7 @@ export type AnthropicChatModelProviderOptionsByName = { * @see https://docs.anthropic.com/claude/docs/pdf-support */ export type AnthropicModelInputModalitiesByName = { + [CLAUDE_OPUS_4_6.id]: typeof CLAUDE_OPUS_4_6.supports.input [CLAUDE_OPUS_4_5.id]: typeof CLAUDE_OPUS_4_5.supports.input [CLAUDE_SONNET_4_5.id]: typeof CLAUDE_SONNET_4_5.supports.input [CLAUDE_HAIKU_4_5.id]: typeof CLAUDE_HAIKU_4_5.supports.input diff --git a/packages/typescript/ai-anthropic/src/utils/client.ts b/packages/typescript/ai-anthropic/src/utils/client.ts index dddc5caf..e42c1255 100644 --- a/packages/typescript/ai-anthropic/src/utils/client.ts +++ b/packages/typescript/ai-anthropic/src/utils/client.ts @@ -1,6 +1,7 @@ import Anthropic_SDK from '@anthropic-ai/sdk' +import type { ClientOptions } from '@anthropic-ai/sdk' -export interface AnthropicClientConfig { +export interface AnthropicClientConfig extends ClientOptions { apiKey: string } @@ -11,6 +12,7 @@ export function createAnthropicClient( config: AnthropicClientConfig, ): Anthropic_SDK { return new Anthropic_SDK({ + ...config, apiKey: config.apiKey, }) } diff --git a/packages/typescript/ai-gemini/src/adapters/summarize.ts b/packages/typescript/ai-gemini/src/adapters/summarize.ts index 40a18bb7..a4f2225e 100644 --- a/packages/typescript/ai-gemini/src/adapters/summarize.ts +++ b/packages/typescript/ai-gemini/src/adapters/summarize.ts @@ -4,8 +4,8 @@ import { generateId, getGeminiApiKeyFromEnv, } from '../utils' - import type { GoogleGenAI } from '@google/genai' +import type { GeminiClientConfig } from '../utils' import type { SummarizeAdapter } from '@tanstack/ai/adapters' import type { StreamChunk, @@ -13,6 +13,10 @@ import type { SummarizationResult, } from '@tanstack/ai' +/** + * Configuration for Gemini summarize adapter + */ +export interface GeminiSummarizeConfig extends GeminiClientConfig {} /** * Available Gemini models for summarization */ @@ -66,15 +70,8 @@ export class GeminiSummarizeAdapter< private client: GoogleGenAI - constructor( - apiKeyOrClient: string | GoogleGenAI, - model: TModel, - _options: GeminiSummarizeAdapterOptions = {}, - ) { - this.client = - typeof apiKeyOrClient === 'string' - ? createGeminiClient({ apiKey: apiKeyOrClient }) - : apiKeyOrClient + constructor(config: GeminiSummarizeConfig, model: TModel) { + this.client = createGeminiClient(config) this.model = model } @@ -224,9 +221,9 @@ export class GeminiSummarizeAdapter< export function createGeminiSummarize( apiKey: string, model: TModel, - options?: GeminiSummarizeAdapterOptions, + config?: Omit, ): GeminiSummarizeAdapter { - return new GeminiSummarizeAdapter(apiKey, model, options) + return new GeminiSummarizeAdapter({ ...config, apiKey }, model) } /** @@ -234,8 +231,8 @@ export function createGeminiSummarize( */ export function geminiSummarize( model: TModel, - options?: GeminiSummarizeAdapterOptions, + config?: Omit, ): GeminiSummarizeAdapter { const apiKey = getGeminiApiKeyFromEnv() - return new GeminiSummarizeAdapter(apiKey, model, options) + return new GeminiSummarizeAdapter({ ...config, apiKey }, model) } diff --git a/packages/typescript/ai-gemini/src/utils/client.ts b/packages/typescript/ai-gemini/src/utils/client.ts index 21e0f2cd..bb92293d 100644 --- a/packages/typescript/ai-gemini/src/utils/client.ts +++ b/packages/typescript/ai-gemini/src/utils/client.ts @@ -1,6 +1,7 @@ import { GoogleGenAI } from '@google/genai' +import type { GoogleGenAIOptions } from '@google/genai' -export interface GeminiClientConfig { +export interface GeminiClientConfig extends GoogleGenAIOptions { apiKey: string } @@ -9,6 +10,7 @@ export interface GeminiClientConfig { */ export function createGeminiClient(config: GeminiClientConfig): GoogleGenAI { return new GoogleGenAI({ + ...config, apiKey: config.apiKey, }) } diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts index beb5dd18..4bdabd35 100644 --- a/packages/typescript/ai-grok/src/adapters/image.ts +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -5,7 +5,7 @@ import { validateNumberOfImages, validatePrompt, } from '../image/image-provider-options' -import type { GROK_IMAGE_MODELS } from '../model-meta' +import type { GrokImageModel } from '../model-meta' import type { GrokImageModelProviderOptionsByName, GrokImageModelSizeByName, @@ -24,9 +24,6 @@ import type { GrokClientConfig } from '../utils' */ export interface GrokImageConfig extends GrokClientConfig {} -/** Model type for Grok Image */ -export type GrokImageModel = (typeof GROK_IMAGE_MODELS)[number] - /** * Grok Image Generation Adapter * diff --git a/packages/typescript/ai-grok/src/index.ts b/packages/typescript/ai-grok/src/index.ts index 1002a5dc..a5deb099 100644 --- a/packages/typescript/ai-grok/src/index.ts +++ b/packages/typescript/ai-grok/src/index.ts @@ -27,7 +27,6 @@ export { createGrokImage, grokImage, type GrokImageConfig, - type GrokImageModel, } from './adapters/image' export type { GrokImageProviderOptions, @@ -43,6 +42,8 @@ export type { GrokModelInputModalitiesByName, ResolveProviderOptions, ResolveInputModalities, + GrokChatModel, + GrokImageModel, } from './model-meta' export { GROK_CHAT_MODELS, GROK_IMAGE_MODELS } from './model-meta' export type { diff --git a/packages/typescript/ai-grok/src/model-meta.ts b/packages/typescript/ai-grok/src/model-meta.ts index c097d9ba..bae811b9 100644 --- a/packages/typescript/ai-grok/src/model-meta.ts +++ b/packages/typescript/ai-grok/src/model-meta.ts @@ -230,6 +230,9 @@ export const GROK_CHAT_MODELS = [ */ export const GROK_IMAGE_MODELS = [GROK_2_IMAGE.name] as const +export type GrokChatModel = (typeof GROK_CHAT_MODELS)[number] +export type GrokImageModel = (typeof GROK_IMAGE_MODELS)[number] + /** * Type-only map from Grok chat model name to its supported input modalities. * Used for type inference when constructing multimodal messages. diff --git a/packages/typescript/ai-grok/src/utils/client.ts b/packages/typescript/ai-grok/src/utils/client.ts index 2a559076..54f70eaf 100644 --- a/packages/typescript/ai-grok/src/utils/client.ts +++ b/packages/typescript/ai-grok/src/utils/client.ts @@ -1,8 +1,8 @@ import OpenAI_SDK from 'openai' +import type { ClientOptions } from 'openai' -export interface GrokClientConfig { +export interface GrokClientConfig extends ClientOptions { apiKey: string - baseURL?: string } /** @@ -10,6 +10,7 @@ export interface GrokClientConfig { */ export function createGrokClient(config: GrokClientConfig): OpenAI_SDK { return new OpenAI_SDK({ + ...config, apiKey: config.apiKey, baseURL: config.baseURL || 'https://api.x.ai/v1', }) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index eeb91152..dc60caa3 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -621,7 +621,7 @@ importers: version: 1.1.0 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.2.1 @@ -629,7 +629,7 @@ importers: packages/typescript/ai-anthropic: dependencies: '@anthropic-ai/sdk': - specifier: ^0.71.0 + specifier: ^0.71.2 version: 0.71.2(zod@4.2.1) devDependencies: '@tanstack/ai': @@ -637,7 +637,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.2.1 @@ -650,7 +650,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -678,7 +678,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -700,7 +700,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -719,7 +719,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -735,7 +735,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -751,7 +751,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -767,7 +767,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -786,7 +786,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -805,7 +805,7 @@ importers: version: 3.2.4(preact@10.28.2) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -833,7 +833,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -873,7 +873,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) react: specifier: ^19.2.3 version: 19.2.3 @@ -944,7 +944,7 @@ importers: version: link:../ai-solid '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -972,7 +972,7 @@ importers: version: 24.10.3 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -1049,7 +1049,7 @@ importers: version: 6.0.3(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(vue@3.5.25(typescript@5.9.3)) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1074,7 +1074,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1093,7 +1093,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) react: specifier: ^19.2.3 version: 19.2.3 @@ -1233,7 +1233,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -4260,6 +4260,9 @@ packages: '@vitest/expect@4.0.17': resolution: {integrity: sha512-mEoqP3RqhKlbmUmntNDDCJeTDavDR+fVYkSOw8qRwJFaW/0/5zA9zFeTrHqNtcmwh6j26yMmwx2PqUDPzt5ZAQ==} + '@vitest/expect@4.0.18': + resolution: {integrity: sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==} + '@vitest/mocker@4.0.15': resolution: {integrity: sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==} peerDependencies: @@ -4282,6 +4285,17 @@ packages: vite: optional: true + '@vitest/mocker@4.0.18': + resolution: {integrity: sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==} + peerDependencies: + msw: ^2.4.9 + vite: ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + '@vitest/pretty-format@4.0.14': resolution: {integrity: sha512-SOYPgujB6TITcJxgd3wmsLl+wZv+fy3av2PpiPpsWPZ6J1ySUYfScfpIt2Yv56ShJXR2MOA6q2KjKHN4EpdyRQ==} @@ -4291,24 +4305,36 @@ packages: '@vitest/pretty-format@4.0.17': resolution: {integrity: sha512-Ah3VAYmjcEdHg6+MwFE17qyLqBHZ+ni2ScKCiW2XrlSBV4H3Z7vYfPfz7CWQ33gyu76oc0Ai36+kgLU3rfF4nw==} + '@vitest/pretty-format@4.0.18': + resolution: {integrity: sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==} + '@vitest/runner@4.0.15': resolution: {integrity: sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==} '@vitest/runner@4.0.17': resolution: {integrity: sha512-JmuQyf8aMWoo/LmNFppdpkfRVHJcsgzkbCA+/Bk7VfNH7RE6Ut2qxegeyx2j3ojtJtKIbIGy3h+KxGfYfk28YQ==} + '@vitest/runner@4.0.18': + resolution: {integrity: sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==} + '@vitest/snapshot@4.0.15': resolution: {integrity: sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==} '@vitest/snapshot@4.0.17': resolution: {integrity: sha512-npPelD7oyL+YQM2gbIYvlavlMVWUfNNGZPcu0aEUQXt7FXTuqhmgiYupPnAanhKvyP6Srs2pIbWo30K0RbDtRQ==} + '@vitest/snapshot@4.0.18': + resolution: {integrity: sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==} + '@vitest/spy@4.0.15': resolution: {integrity: sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==} '@vitest/spy@4.0.17': resolution: {integrity: sha512-I1bQo8QaP6tZlTomQNWKJE6ym4SHf3oLS7ceNjozxxgzavRAgZDc06T7kD8gb9bXKEgcLNt00Z+kZO6KaJ62Ew==} + '@vitest/spy@4.0.18': + resolution: {integrity: sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==} + '@vitest/utils@4.0.14': resolution: {integrity: sha512-hLqXZKAWNg8pI+SQXyXxWCTOpA3MvsqcbVeNgSi8x/CSN2wi26dSzn1wrOhmCmFjEvN9p8/kLFRHa6PI8jHazw==} @@ -4318,6 +4344,9 @@ packages: '@vitest/utils@4.0.17': resolution: {integrity: sha512-RG6iy+IzQpa9SB8HAFHJ9Y+pTzI+h8553MrciN9eC6TFBErqrQaTas4vG+MVj8S4uKk8uTT2p0vgZPnTdxd96w==} + '@vitest/utils@4.0.18': + resolution: {integrity: sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==} + '@volar/language-core@2.4.15': resolution: {integrity: sha512-3VHw+QZU0ZG9IuQmzT68IyN4hZNd9GchGPhbD9+pa8CVv7rnoOZwo7T8weIbrRmihqy3ATpdfXFnqRrfPVK6CA==} @@ -7821,7 +7850,7 @@ packages: tar@7.5.2: resolution: {integrity: sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==} engines: {node: '>=18'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} @@ -8538,6 +8567,40 @@ packages: jsdom: optional: true + vitest@4.0.18: + resolution: {integrity: sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==} + engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@opentelemetry/api': ^1.9.0 + '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 + '@vitest/browser-playwright': 4.0.18 + '@vitest/browser-preview': 4.0.18 + '@vitest/browser-webdriverio': 4.0.18 + '@vitest/ui': 4.0.18 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@opentelemetry/api': + optional: true + '@types/node': + optional: true + '@vitest/browser-playwright': + optional: true + '@vitest/browser-preview': + optional: true + '@vitest/browser-webdriverio': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + vscode-uri@3.1.0: resolution: {integrity: sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==} @@ -12202,7 +12265,7 @@ snapshots: transitivePeerDependencies: - supports-color - '@vitest/coverage-v8@4.0.14(vitest@4.0.17(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/coverage-v8@4.0.14(vitest@4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -12215,11 +12278,11 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.0.3 - vitest: 4.0.17(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color - '@vitest/coverage-v8@4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/coverage-v8@4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -12232,7 +12295,7 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.0.3 - vitest: 4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color @@ -12254,6 +12317,15 @@ snapshots: chai: 6.2.2 tinyrainbow: 3.0.3 + '@vitest/expect@4.0.18': + dependencies: + '@standard-schema/spec': 1.1.0 + '@types/chai': 5.2.3 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 + chai: 6.2.2 + tinyrainbow: 3.0.3 + '@vitest/mocker@4.0.15(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@vitest/spy': 4.0.15 @@ -12270,9 +12342,17 @@ snapshots: optionalDependencies: vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - '@vitest/mocker@4.0.17(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: - '@vitest/spy': 4.0.17 + '@vitest/spy': 4.0.18 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + + '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + dependencies: + '@vitest/spy': 4.0.18 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: @@ -12290,6 +12370,10 @@ snapshots: dependencies: tinyrainbow: 3.0.3 + '@vitest/pretty-format@4.0.18': + dependencies: + tinyrainbow: 3.0.3 + '@vitest/runner@4.0.15': dependencies: '@vitest/utils': 4.0.15 @@ -12300,6 +12384,11 @@ snapshots: '@vitest/utils': 4.0.17 pathe: 2.0.3 + '@vitest/runner@4.0.18': + dependencies: + '@vitest/utils': 4.0.18 + pathe: 2.0.3 + '@vitest/snapshot@4.0.15': dependencies: '@vitest/pretty-format': 4.0.15 @@ -12312,10 +12401,18 @@ snapshots: magic-string: 0.30.21 pathe: 2.0.3 + '@vitest/snapshot@4.0.18': + dependencies: + '@vitest/pretty-format': 4.0.18 + magic-string: 0.30.21 + pathe: 2.0.3 + '@vitest/spy@4.0.15': {} '@vitest/spy@4.0.17': {} + '@vitest/spy@4.0.18': {} + '@vitest/utils@4.0.14': dependencies: '@vitest/pretty-format': 4.0.14 @@ -12331,6 +12428,11 @@ snapshots: '@vitest/pretty-format': 4.0.17 tinyrainbow: 3.0.3 + '@vitest/utils@4.0.18': + dependencies: + '@vitest/pretty-format': 4.0.18 + tinyrainbow: 3.0.3 + '@volar/language-core@2.4.15': dependencies: '@volar/source-map': 2.4.15 @@ -17467,15 +17569,54 @@ snapshots: - tsx - yaml - vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: - '@vitest/expect': 4.0.17 - '@vitest/mocker': 4.0.17(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.0.17 - '@vitest/runner': 4.0.17 - '@vitest/snapshot': 4.0.17 - '@vitest/spy': 4.0.17 - '@vitest/utils': 4.0.17 + '@vitest/expect': 4.0.18 + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.18 + '@vitest/runner': 4.0.18 + '@vitest/snapshot': 4.0.18 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 + es-module-lexer: 1.7.0 + expect-type: 1.3.0 + magic-string: 0.30.21 + obug: 2.1.1 + pathe: 2.0.3 + picomatch: 4.0.3 + std-env: 3.10.0 + tinybench: 2.9.0 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 + vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + why-is-node-running: 2.3.0 + optionalDependencies: + '@types/node': 24.10.3 + happy-dom: 20.0.11 + jsdom: 27.3.0(postcss@8.5.6) + transitivePeerDependencies: + - jiti + - less + - lightningcss + - msw + - sass + - sass-embedded + - stylus + - sugarss + - terser + - tsx + - yaml + + vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): + dependencies: + '@vitest/expect': 4.0.18 + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.18 + '@vitest/runner': 4.0.18 + '@vitest/snapshot': 4.0.18 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 es-module-lexer: 1.7.0 expect-type: 1.3.0 magic-string: 0.30.21 From ffba47d901101182a6eef48a3f223f1a1723c07a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 9 Feb 2026 12:46:34 +0100 Subject: [PATCH 05/16] ci: Version Packages (#279) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .changeset/slimy-ways-wave.md | 7 ------- examples/ts-svelte-chat/CHANGELOG.md | 8 ++++++++ examples/ts-svelte-chat/package.json | 2 +- examples/ts-vue-chat/CHANGELOG.md | 8 ++++++++ examples/ts-vue-chat/package.json | 2 +- packages/typescript/ai-anthropic/CHANGELOG.md | 6 ++++++ packages/typescript/ai-anthropic/package.json | 2 +- packages/typescript/ai-gemini/CHANGELOG.md | 6 ++++++ packages/typescript/ai-gemini/package.json | 2 +- packages/typescript/ai-grok/CHANGELOG.md | 6 ++++++ packages/typescript/ai-grok/package.json | 2 +- packages/typescript/smoke-tests/adapters/CHANGELOG.md | 9 +++++++++ packages/typescript/smoke-tests/adapters/package.json | 2 +- packages/typescript/smoke-tests/e2e/CHANGELOG.md | 7 +++++++ packages/typescript/smoke-tests/e2e/package.json | 2 +- 15 files changed, 57 insertions(+), 14 deletions(-) delete mode 100644 .changeset/slimy-ways-wave.md diff --git a/.changeset/slimy-ways-wave.md b/.changeset/slimy-ways-wave.md deleted file mode 100644 index 06404a7d..00000000 --- a/.changeset/slimy-ways-wave.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -'@tanstack/ai-anthropic': patch -'@tanstack/ai-gemini': patch -'@tanstack/ai-grok': patch ---- - -Add in opus 4.6 and enhance acceptable config options by providers diff --git a/examples/ts-svelte-chat/CHANGELOG.md b/examples/ts-svelte-chat/CHANGELOG.md index 52b04b20..8a0ab92c 100644 --- a/examples/ts-svelte-chat/CHANGELOG.md +++ b/examples/ts-svelte-chat/CHANGELOG.md @@ -1,5 +1,13 @@ # ts-svelte-chat +## 0.1.10 + +### Patch Changes + +- Updated dependencies [[`6bd3a8b`](https://github.com/TanStack/ai/commit/6bd3a8ba76594f302333625605dbb428f2413d29)]: + - @tanstack/ai-anthropic@0.4.2 + - @tanstack/ai-gemini@0.4.1 + ## 0.1.9 ### Patch Changes diff --git a/examples/ts-svelte-chat/package.json b/examples/ts-svelte-chat/package.json index e7510d8c..c98dcaa0 100644 --- a/examples/ts-svelte-chat/package.json +++ b/examples/ts-svelte-chat/package.json @@ -1,7 +1,7 @@ { "name": "ts-svelte-chat", "private": true, - "version": "0.1.9", + "version": "0.1.10", "type": "module", "scripts": { "dev": "vite dev --port 3000", diff --git a/examples/ts-vue-chat/CHANGELOG.md b/examples/ts-vue-chat/CHANGELOG.md index 3779febc..9d5142ad 100644 --- a/examples/ts-vue-chat/CHANGELOG.md +++ b/examples/ts-vue-chat/CHANGELOG.md @@ -1,5 +1,13 @@ # ts-vue-chat +## 0.1.10 + +### Patch Changes + +- Updated dependencies [[`6bd3a8b`](https://github.com/TanStack/ai/commit/6bd3a8ba76594f302333625605dbb428f2413d29)]: + - @tanstack/ai-anthropic@0.4.2 + - @tanstack/ai-gemini@0.4.1 + ## 0.1.9 ### Patch Changes diff --git a/examples/ts-vue-chat/package.json b/examples/ts-vue-chat/package.json index 135f0ebc..39710b4d 100644 --- a/examples/ts-vue-chat/package.json +++ b/examples/ts-vue-chat/package.json @@ -1,6 +1,6 @@ { "name": "ts-vue-chat", - "version": "0.1.9", + "version": "0.1.10", "private": true, "type": "module", "scripts": { diff --git a/packages/typescript/ai-anthropic/CHANGELOG.md b/packages/typescript/ai-anthropic/CHANGELOG.md index d06545f1..fc6093a8 100644 --- a/packages/typescript/ai-anthropic/CHANGELOG.md +++ b/packages/typescript/ai-anthropic/CHANGELOG.md @@ -1,5 +1,11 @@ # @tanstack/ai-anthropic +## 0.4.2 + +### Patch Changes + +- Add in opus 4.6 and enhance acceptable config options by providers ([#278](https://github.com/TanStack/ai/pull/278)) + ## 0.4.1 ### Patch Changes diff --git a/packages/typescript/ai-anthropic/package.json b/packages/typescript/ai-anthropic/package.json index 82d75c66..eecbe2a6 100644 --- a/packages/typescript/ai-anthropic/package.json +++ b/packages/typescript/ai-anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-anthropic", - "version": "0.4.1", + "version": "0.4.2", "description": "Anthropic Claude adapter for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-gemini/CHANGELOG.md b/packages/typescript/ai-gemini/CHANGELOG.md index 8fd058fb..69f753a6 100644 --- a/packages/typescript/ai-gemini/CHANGELOG.md +++ b/packages/typescript/ai-gemini/CHANGELOG.md @@ -1,5 +1,11 @@ # @tanstack/ai-gemini +## 0.4.1 + +### Patch Changes + +- Add in opus 4.6 and enhance acceptable config options by providers ([#278](https://github.com/TanStack/ai/pull/278)) + ## 0.4.0 ### Patch Changes diff --git a/packages/typescript/ai-gemini/package.json b/packages/typescript/ai-gemini/package.json index 4e85ad7a..6d5fb2aa 100644 --- a/packages/typescript/ai-gemini/package.json +++ b/packages/typescript/ai-gemini/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-gemini", - "version": "0.4.0", + "version": "0.4.1", "description": "Google Gemini adapter for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-grok/CHANGELOG.md b/packages/typescript/ai-grok/CHANGELOG.md index 4d844ac1..9c4dd05d 100644 --- a/packages/typescript/ai-grok/CHANGELOG.md +++ b/packages/typescript/ai-grok/CHANGELOG.md @@ -1,5 +1,11 @@ # @tanstack/ai-grok +## 0.4.1 + +### Patch Changes + +- Add in opus 4.6 and enhance acceptable config options by providers ([#278](https://github.com/TanStack/ai/pull/278)) + ## 0.4.0 ### Patch Changes diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json index 8edd53fe..2dfc1270 100644 --- a/packages/typescript/ai-grok/package.json +++ b/packages/typescript/ai-grok/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-grok", - "version": "0.4.0", + "version": "0.4.1", "description": "Grok (xAI) adapter for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/smoke-tests/adapters/CHANGELOG.md b/packages/typescript/smoke-tests/adapters/CHANGELOG.md index cca4c061..8b696792 100644 --- a/packages/typescript/smoke-tests/adapters/CHANGELOG.md +++ b/packages/typescript/smoke-tests/adapters/CHANGELOG.md @@ -1,5 +1,14 @@ # @tanstack/tests-adapters +## 0.1.11 + +### Patch Changes + +- Updated dependencies [[`6bd3a8b`](https://github.com/TanStack/ai/commit/6bd3a8ba76594f302333625605dbb428f2413d29)]: + - @tanstack/ai-anthropic@0.4.2 + - @tanstack/ai-gemini@0.4.1 + - @tanstack/ai-grok@0.4.1 + ## 0.1.10 ### Patch Changes diff --git a/packages/typescript/smoke-tests/adapters/package.json b/packages/typescript/smoke-tests/adapters/package.json index b1dceae1..6db3c656 100644 --- a/packages/typescript/smoke-tests/adapters/package.json +++ b/packages/typescript/smoke-tests/adapters/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/tests-adapters", - "version": "0.1.10", + "version": "0.1.11", "private": true, "description": "Tests for TanStack AI adapters", "author": "", diff --git a/packages/typescript/smoke-tests/e2e/CHANGELOG.md b/packages/typescript/smoke-tests/e2e/CHANGELOG.md index c77ff8a2..839861b9 100644 --- a/packages/typescript/smoke-tests/e2e/CHANGELOG.md +++ b/packages/typescript/smoke-tests/e2e/CHANGELOG.md @@ -1,5 +1,12 @@ # @tanstack/smoke-tests-e2e +## 0.0.15 + +### Patch Changes + +- Updated dependencies []: + - @tanstack/tests-adapters@0.1.11 + ## 0.0.14 ### Patch Changes diff --git a/packages/typescript/smoke-tests/e2e/package.json b/packages/typescript/smoke-tests/e2e/package.json index af604c09..d0dc7fca 100644 --- a/packages/typescript/smoke-tests/e2e/package.json +++ b/packages/typescript/smoke-tests/e2e/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/smoke-tests-e2e", - "version": "0.0.14", + "version": "0.0.15", "description": "E2E tests for TanStack AI chat", "private": true, "type": "module", From 9602b14e1720e204189fa532a09db6055fd0557b Mon Sep 17 00:00:00 2001 From: Jack Herrington Date: Tue, 10 Feb 2026 02:08:24 -0800 Subject: [PATCH 06/16] fix: anthropic tool call issues (#275) * fix: anthropic tool call issues * fixing pnpm lock * ci: apply automated fixes * reworking model to uimessage conversions * simplifying the message conversion handling * ci: apply automated fixes * more small fixups * simplifying the message conversion handling * small test fixups * ci: apply automated fixes --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../fix-anthropic-multi-turn-tool-calls.md | 35 + .changeset/tighten-adapter-contract.md | 25 + .gitignore | 4 +- examples/ts-group-chat/package.json | 10 +- examples/ts-react-chat/package.json | 12 +- examples/ts-react-chat/src/routeTree.gen.ts | 6 +- examples/ts-solid-chat/package.json | 4 +- .../ai-anthropic/src/adapters/text.ts | 158 +- .../typescript/ai-anthropic/src/model-meta.ts | 1 + .../src/text/text-provider-options.ts | 40 +- .../tests/anthropic-adapter.test.ts | 504 +++++ .../typescript/ai-client/src/chat-client.ts | 60 +- .../typescript/ai-gemini/src/adapters/text.ts | 79 +- .../ai-gemini/tests/gemini-adapter.test.ts | 168 ++ .../typescript/ai/docs/chat-architecture.md | 587 ++++++ .../ai/src/activities/chat/index.ts | 21 +- .../ai/src/activities/chat/messages.ts | 349 +-- .../src/activities/chat/stream/processor.ts | 356 +++- .../src/activities/chat/tools/tool-calls.ts | 5 +- packages/typescript/ai/src/types.ts | 10 +- packages/typescript/ai/tests/chat.test.ts | 1080 ++++++++++ .../ai/tests/message-converters.test.ts | 1125 +++++++++- .../ai/tests/stream-processor.test.ts | 1865 ++++++++++++++++- .../ai/tests/tool-call-manager.test.ts | 271 ++- .../adapters/src/tests/apr-approval-flow.ts | 5 +- .../typescript/smoke-tests/e2e/package.json | 8 +- pnpm-lock.yaml | 562 +++-- scripts/fix-version-bump.ts | 156 ++ testing/panel/package.json | 6 +- testing/panel/src/lib/model-selection.ts | 2 +- testing/panel/tests/tool-flow.spec.ts | 79 + 31 files changed, 6868 insertions(+), 725 deletions(-) create mode 100644 .changeset/fix-anthropic-multi-turn-tool-calls.md create mode 100644 .changeset/tighten-adapter-contract.md create mode 100644 packages/typescript/ai/docs/chat-architecture.md create mode 100644 packages/typescript/ai/tests/chat.test.ts create mode 100644 scripts/fix-version-bump.ts diff --git a/.changeset/fix-anthropic-multi-turn-tool-calls.md b/.changeset/fix-anthropic-multi-turn-tool-calls.md new file mode 100644 index 00000000..34553ece --- /dev/null +++ b/.changeset/fix-anthropic-multi-turn-tool-calls.md @@ -0,0 +1,35 @@ +--- +'@tanstack/ai': patch +'@tanstack/ai-client': patch +'@tanstack/ai-anthropic': patch +'@tanstack/ai-gemini': patch +--- + +fix(ai, ai-client, ai-anthropic, ai-gemini): fix multi-turn conversations failing after tool calls + +**Core (@tanstack/ai):** + +- Lazy assistant message creation: `StreamProcessor` now defers creating the assistant message until the first content-bearing chunk arrives (text, tool call, thinking, or error), eliminating empty `parts: []` messages from appearing during auto-continuation when the model returns no content +- Add `prepareAssistantMessage()` (lazy) alongside deprecated `startAssistantMessage()` (eager, backwards-compatible) +- Add `getCurrentAssistantMessageId()` to check if a message was created +- **Rewrite `uiMessageToModelMessages()` to preserve part ordering**: the function now walks parts sequentially instead of separating by type, producing correctly interleaved assistant/tool messages (text1 + toolCall1 → toolResult1 → text2 + toolCall2 → toolResult2) instead of concatenating all text and batching all tool calls. This fixes multi-round tool flows where the model would see garbled conversation history and re-call tools unnecessarily. +- Deduplicate tool result messages: when a client tool has both a `tool-result` part and a `tool-call` part with `output`, only one `role: 'tool'` message is emitted per tool call ID + +**Client (@tanstack/ai-client):** + +- Update `ChatClient.processStream()` to use lazy assistant message creation, preventing UI flicker from empty messages being created then removed + +**Anthropic:** + +- Fix consecutive user-role messages violating Anthropic's alternating role requirement by merging them in `formatMessages` +- Deduplicate `tool_result` blocks with the same `tool_use_id` +- Filter out empty assistant messages from conversation history +- Suppress duplicate `RUN_FINISHED` event from `message_stop` when `message_delta` already emitted one +- Fix `TEXT_MESSAGE_END` incorrectly emitting for `tool_use` content blocks +- Add Claude Opus 4.6 model support with adaptive thinking and effort parameter + +**Gemini:** + +- Fix consecutive user-role messages violating Gemini's alternating role requirement by merging them in `formatMessages` +- Deduplicate `functionResponse` parts with the same name (tool call ID) +- Filter out empty model messages from conversation history diff --git a/.changeset/tighten-adapter-contract.md b/.changeset/tighten-adapter-contract.md new file mode 100644 index 00000000..78a7af3f --- /dev/null +++ b/.changeset/tighten-adapter-contract.md @@ -0,0 +1,25 @@ +--- +'@tanstack/ai': minor +'@tanstack/ai-anthropic': minor +'@tanstack/ai-gemini': minor +--- + +Tighten the AG-UI adapter contract and simplify the core stream processor. + +**Breaking type changes:** + +- `TextMessageContentEvent.delta` is now required (was optional) +- `StepFinishedEvent.delta` is now required (was optional) + +All first-party adapters already sent `delta` on every event, so this is a type-level enforcement of existing behavior. Community adapters that follow the reference implementations will not need code changes. + +**Core processor simplifications:** + +- `TEXT_MESSAGE_START` now resets text segment state, replacing heuristic overlap detection +- `TOOL_CALL_END` is now the authoritative signal for tool call input completion +- Removed delta/content fallback logic, whitespace-only message cleanup, and finish-reason conflict arbitration from the processor + +**Adapter fixes:** + +- Gemini: filter whitespace-only text parts, fix STEP_FINISHED content accumulation, emit fresh TEXT_MESSAGE_START after tool calls +- Anthropic: emit fresh TEXT_MESSAGE_START after tool_use blocks for proper text segmentation diff --git a/.gitignore b/.gitignore index 91d2a1c5..34054660 100644 --- a/.gitignore +++ b/.gitignore @@ -56,4 +56,6 @@ test-traces # Playwright playwright-report -test-results \ No newline at end of file +test-results + +STATUS_*.md \ No newline at end of file diff --git a/examples/ts-group-chat/package.json b/examples/ts-group-chat/package.json index 90137ede..3d91b522 100644 --- a/examples/ts-group-chat/package.json +++ b/examples/ts-group-chat/package.json @@ -14,11 +14,11 @@ "@tanstack/ai-client": "workspace:*", "@tanstack/ai-react": "workspace:*", "@tanstack/react-devtools": "^0.8.2", - "@tanstack/react-router": "^1.141.1", - "@tanstack/react-router-devtools": "^1.139.7", - "@tanstack/react-router-ssr-query": "^1.139.7", - "@tanstack/react-start": "^1.141.1", - "@tanstack/router-plugin": "^1.139.7", + "@tanstack/react-router": "^1.158.4", + "@tanstack/react-router-devtools": "^1.158.4", + "@tanstack/react-router-ssr-query": "^1.158.4", + "@tanstack/react-start": "^1.159.0", + "@tanstack/router-plugin": "^1.158.4", "capnweb": "^0.1.0", "react": "^19.2.3", "react-dom": "^19.2.3", diff --git a/examples/ts-react-chat/package.json b/examples/ts-react-chat/package.json index f58f54bd..d74d35af 100644 --- a/examples/ts-react-chat/package.json +++ b/examples/ts-react-chat/package.json @@ -20,14 +20,14 @@ "@tanstack/ai-openrouter": "workspace:*", "@tanstack/ai-react": "workspace:*", "@tanstack/ai-react-ui": "workspace:*", - "@tanstack/nitro-v2-vite-plugin": "^1.141.0", + "@tanstack/nitro-v2-vite-plugin": "^1.154.7", "@tanstack/react-devtools": "^0.8.2", - "@tanstack/react-router": "^1.141.1", - "@tanstack/react-router-devtools": "^1.139.7", - "@tanstack/react-router-ssr-query": "^1.139.7", - "@tanstack/react-start": "^1.141.1", + "@tanstack/react-router": "^1.158.4", + "@tanstack/react-router-devtools": "^1.158.4", + "@tanstack/react-router-ssr-query": "^1.158.4", + "@tanstack/react-start": "^1.159.0", "@tanstack/react-store": "^0.8.0", - "@tanstack/router-plugin": "^1.139.7", + "@tanstack/router-plugin": "^1.158.4", "@tanstack/store": "^0.8.0", "highlight.js": "^11.11.1", "lucide-react": "^0.561.0", diff --git a/examples/ts-react-chat/src/routeTree.gen.ts b/examples/ts-react-chat/src/routeTree.gen.ts index 627a240c..f7099165 100644 --- a/examples/ts-react-chat/src/routeTree.gen.ts +++ b/examples/ts-react-chat/src/routeTree.gen.ts @@ -39,7 +39,7 @@ export interface FileRoutesByFullPath { '/': typeof IndexRoute '/api/tanchat': typeof ApiTanchatRoute '/example/guitars/$guitarId': typeof ExampleGuitarsGuitarIdRoute - '/example/guitars': typeof ExampleGuitarsIndexRoute + '/example/guitars/': typeof ExampleGuitarsIndexRoute } export interface FileRoutesByTo { '/': typeof IndexRoute @@ -60,7 +60,7 @@ export interface FileRouteTypes { | '/' | '/api/tanchat' | '/example/guitars/$guitarId' - | '/example/guitars' + | '/example/guitars/' fileRoutesByTo: FileRoutesByTo to: '/' | '/api/tanchat' | '/example/guitars/$guitarId' | '/example/guitars' id: @@ -97,7 +97,7 @@ declare module '@tanstack/react-router' { '/example/guitars/': { id: '/example/guitars/' path: '/example/guitars' - fullPath: '/example/guitars' + fullPath: '/example/guitars/' preLoaderRoute: typeof ExampleGuitarsIndexRouteImport parentRoute: typeof rootRouteImport } diff --git a/examples/ts-solid-chat/package.json b/examples/ts-solid-chat/package.json index 3a9ea9e8..bfe5ec05 100644 --- a/examples/ts-solid-chat/package.json +++ b/examples/ts-solid-chat/package.json @@ -19,8 +19,8 @@ "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-solid": "workspace:*", "@tanstack/ai-solid-ui": "workspace:*", - "@tanstack/nitro-v2-vite-plugin": "^1.141.0", - "@tanstack/router-plugin": "^1.139.7", + "@tanstack/nitro-v2-vite-plugin": "^1.154.7", + "@tanstack/router-plugin": "^1.158.4", "@tanstack/solid-ai-devtools": "workspace:*", "@tanstack/solid-devtools": "^0.7.15", "@tanstack/solid-router": "^1.139.10", diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index a4f41bbb..235d9f5b 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -247,6 +247,7 @@ export class AnthropicTextAdapter< const validKeys: Array = [ 'container', 'context_management', + 'effort', 'mcp_servers', 'service_tier', 'stop_sequences', @@ -450,7 +451,74 @@ export class AnthropicTextAdapter< }) } - return formattedMessages + // Post-process: Anthropic requires strictly alternating user/assistant roles. + // Tool results are sent as role:'user' messages, which can create consecutive + // user messages when followed by a new user message. Merge them. + return this.mergeConsecutiveSameRoleMessages(formattedMessages) + } + + /** + * Merge consecutive messages of the same role into a single message. + * Anthropic's API requires strictly alternating user/assistant roles. + * Tool results are wrapped as role:'user' messages, which can collide + * with actual user messages in multi-turn conversations. + * + * Also filters out empty assistant messages (e.g., from a previous failed request). + */ + private mergeConsecutiveSameRoleMessages( + messages: InternalTextProviderOptions['messages'], + ): InternalTextProviderOptions['messages'] { + const merged: InternalTextProviderOptions['messages'] = [] + + for (const msg of messages) { + // Skip empty assistant messages (no content or empty string) + if (msg.role === 'assistant') { + const hasContent = Array.isArray(msg.content) + ? msg.content.length > 0 + : typeof msg.content === 'string' && msg.content.length > 0 + if (!hasContent) { + continue + } + } + + const prev = merged[merged.length - 1] + if (prev && prev.role === msg.role) { + // Normalize both contents to arrays and concatenate + const prevBlocks = Array.isArray(prev.content) + ? prev.content + : typeof prev.content === 'string' && prev.content + ? [{ type: 'text' as const, text: prev.content }] + : [] + const msgBlocks = Array.isArray(msg.content) + ? msg.content + : typeof msg.content === 'string' && msg.content + ? [{ type: 'text' as const, text: msg.content }] + : [] + prev.content = [...prevBlocks, ...msgBlocks] + } else { + merged.push({ ...msg }) + } + } + + // De-duplicate tool_result blocks with the same tool_use_id. + // This can happen when the core layer generates tool results from both + // the tool-result part and the tool-call part's output field. + for (const msg of merged) { + if (Array.isArray(msg.content)) { + const seenToolResultIds = new Set() + msg.content = msg.content.filter((block: any) => { + if (block.type === 'tool_result' && block.tool_use_id) { + if (seenToolResultIds.has(block.tool_use_id)) { + return false // Remove duplicate + } + seenToolResultIds.add(block.tool_use_id) + } + return true + }) + } + } + + return merged } private async *processAnthropicStream( @@ -473,6 +541,9 @@ export class AnthropicTextAdapter< let stepId: string | null = null let hasEmittedRunStarted = false let hasEmittedTextMessageStart = false + let hasEmittedRunFinished = false + // Track current content block type for proper content_block_stop handling + let currentBlockType: string | null = null try { for await (const event of stream) { @@ -488,6 +559,7 @@ export class AnthropicTextAdapter< } if (event.type === 'content_block_start') { + currentBlockType = event.content_block.type if (event.content_block.type === 'tool_use') { currentToolIndex++ toolCallsMap.set(currentToolIndex, { @@ -572,59 +644,71 @@ export class AnthropicTextAdapter< } } } else if (event.type === 'content_block_stop') { - const existing = toolCallsMap.get(currentToolIndex) - if (existing) { - // If tool call wasn't started yet (no args), start it now - if (!existing.started) { - existing.started = true + if (currentBlockType === 'tool_use') { + const existing = toolCallsMap.get(currentToolIndex) + if (existing) { + // If tool call wasn't started yet (no args), start it now + if (!existing.started) { + existing.started = true + yield { + type: 'TOOL_CALL_START', + toolCallId: existing.id, + toolName: existing.name, + model, + timestamp, + index: currentToolIndex, + } + } + + // Emit TOOL_CALL_END + let parsedInput: unknown = {} + try { + const parsed = existing.input ? JSON.parse(existing.input) : {} + parsedInput = parsed && typeof parsed === 'object' ? parsed : {} + } catch { + parsedInput = {} + } + yield { - type: 'TOOL_CALL_START', + type: 'TOOL_CALL_END', toolCallId: existing.id, toolName: existing.name, model, timestamp, - index: currentToolIndex, + input: parsedInput, } - } - // Emit TOOL_CALL_END - let parsedInput: unknown = {} - try { - const parsed = existing.input ? JSON.parse(existing.input) : {} - parsedInput = parsed && typeof parsed === 'object' ? parsed : {} - } catch { - parsedInput = {} + // Reset so a new TEXT_MESSAGE_START is emitted if text follows tool calls + hasEmittedTextMessageStart = false } - - yield { - type: 'TOOL_CALL_END', - toolCallId: existing.id, - toolName: existing.name, - model, - timestamp, - input: parsedInput, + } else { + // Emit TEXT_MESSAGE_END only for text blocks (not tool_use blocks) + if (hasEmittedTextMessageStart && accumulatedContent) { + yield { + type: 'TEXT_MESSAGE_END', + messageId, + model, + timestamp, + } } } - - // Emit TEXT_MESSAGE_END if we had text content - if (hasEmittedTextMessageStart && accumulatedContent) { + currentBlockType = null + } else if (event.type === 'message_stop') { + // Only emit RUN_FINISHED from message_stop if message_delta didn't already emit one. + // message_delta carries the real stop_reason (tool_use, end_turn, etc.), + // while message_stop is just a completion signal. + if (!hasEmittedRunFinished) { yield { - type: 'TEXT_MESSAGE_END', - messageId, + type: 'RUN_FINISHED', + runId, model, timestamp, + finishReason: 'stop', } } - } else if (event.type === 'message_stop') { - yield { - type: 'RUN_FINISHED', - runId, - model, - timestamp, - finishReason: 'stop', - } } else if (event.type === 'message_delta') { if (event.delta.stop_reason) { + hasEmittedRunFinished = true switch (event.delta.stop_reason) { case 'tool_use': { yield { diff --git a/packages/typescript/ai-anthropic/src/model-meta.ts b/packages/typescript/ai-anthropic/src/model-meta.ts index 1f642b86..ec7ca780 100644 --- a/packages/typescript/ai-anthropic/src/model-meta.ts +++ b/packages/typescript/ai-anthropic/src/model-meta.ts @@ -19,6 +19,7 @@ interface ModelMeta< supports: { input: Array<'text' | 'image' | 'audio' | 'video' | 'document'> extended_thinking?: boolean + adaptive_thinking?: boolean priority_tier?: boolean } context_window?: number diff --git a/packages/typescript/ai-anthropic/src/text/text-provider-options.ts b/packages/typescript/ai-anthropic/src/text/text-provider-options.ts index 8c4dfecc..b26da487 100644 --- a/packages/typescript/ai-anthropic/src/text/text-provider-options.ts +++ b/packages/typescript/ai-anthropic/src/text/text-provider-options.ts @@ -92,6 +92,42 @@ Must be ≥1024 and less than max_tokens } } +export interface AnthropicAdaptiveThinkingOptions { + /** + * Configuration for Claude's adaptive thinking (Opus 4.6+). + * + * In adaptive mode, Claude dynamically decides when and how much to think. + * Use the effort parameter to control thinking depth. + * `thinking: {type: "enabled"}` with `budget_tokens` is deprecated on Opus 4.6. + */ + thinking?: + | { + type: 'adaptive' + } + | { + /** + * @deprecated Use `type: 'adaptive'` with the effort parameter on Opus 4.6+. + */ + budget_tokens: number + type: 'enabled' + } + | { + type: 'disabled' + } +} + +export interface AnthropicEffortOptions { + /** + * Controls the thinking depth for adaptive thinking mode (Opus 4.6+). + * + * - `max`: Absolute highest capability + * - `high`: Default - Claude will almost always think + * - `medium`: Balanced cost-quality + * - `low`: May skip thinking for simpler problems + */ + effort?: 'max' | 'high' | 'medium' | 'low' +} + export interface AnthropicToolChoiceOptions { tool_choice?: BetaToolChoiceAny | BetaToolChoiceTool | BetaToolChoiceAuto } @@ -115,7 +151,9 @@ export type ExternalTextProviderOptions = AnthropicContainerOptions & AnthropicStopSequencesOptions & AnthropicThinkingOptions & AnthropicToolChoiceOptions & - AnthropicSamplingOptions + AnthropicSamplingOptions & + Partial & + Partial export interface InternalTextProviderOptions extends ExternalTextProviderOptions { model: string diff --git a/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts b/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts index 5e3db434..76c50ccf 100644 --- a/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts +++ b/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts @@ -183,4 +183,508 @@ describe('Anthropic adapter option mapping', () => { type: 'custom', }) }) + + it('merges consecutive user messages when tool results precede a follow-up user message', async () => { + // This is the core multi-turn bug: after a tool call + result, the next user message + // creates consecutive role:'user' messages (tool_result as user + new user message). + // Anthropic's API requires strictly alternating user/assistant roles. + const mockStream = (async function* () { + yield { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + } + yield { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Here is a recommendation' }, + } + yield { + type: 'content_block_stop', + index: 0, + } + yield { + type: 'message_delta', + delta: { stop_reason: 'end_turn' }, + usage: { output_tokens: 10 }, + } + yield { type: 'message_stop' } + })() + + mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + + const adapter = createAdapter('claude-3-7-sonnet-20250219') + + // Multi-turn: user -> assistant(tool_calls) -> tool_result -> follow-up user + const chunks: StreamChunk[] = [] + for await (const chunk of chat({ + adapter, + messages: [ + { role: 'user', content: 'What is the weather in Berlin?' }, + { + role: 'assistant', + content: 'Let me check the weather.', + toolCalls: [ + { + id: 'call_1', + type: 'function', + function: { name: 'lookup_weather', arguments: toolArguments }, + }, + ], + }, + { role: 'tool', toolCallId: 'call_1', content: '{"temp":72}' }, + { role: 'user', content: 'What about Paris?' }, + ], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + expect(mocks.betaMessagesCreate).toHaveBeenCalledTimes(1) + const [payload] = mocks.betaMessagesCreate.mock.calls[0] + + // The tool_result (user) and follow-up user message should be merged into one user message + expect(payload.messages).toEqual([ + { + role: 'user', + content: 'What is the weather in Berlin?', + }, + { + role: 'assistant', + content: [ + { type: 'text', text: 'Let me check the weather.' }, + { + type: 'tool_use', + id: 'call_1', + name: 'lookup_weather', + input: { location: 'Berlin' }, + }, + ], + }, + { + role: 'user', + content: [ + { + type: 'tool_result', + tool_use_id: 'call_1', + content: '{"temp":72}', + }, + { type: 'text', text: 'What about Paris?' }, + ], + }, + ]) + + // Verify roles strictly alternate: user, assistant, user + const roles = payload.messages.map((m: any) => m.role) + for (let i = 1; i < roles.length; i++) { + expect(roles[i]).not.toBe(roles[i - 1]) + } + }) + + it('merges multiple consecutive tool result messages into one user message', async () => { + // When multiple tools are called, each tool result becomes a role:'user' message. + // These must be merged into a single user message. + const mockStream = (async function* () { + yield { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + } + yield { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Here are the results' }, + } + yield { + type: 'content_block_stop', + index: 0, + } + yield { + type: 'message_delta', + delta: { stop_reason: 'end_turn' }, + usage: { output_tokens: 5 }, + } + yield { type: 'message_stop' } + })() + + mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + + const adapter = createAdapter('claude-3-7-sonnet-20250219') + + const chunks: StreamChunk[] = [] + for await (const chunk of chat({ + adapter, + messages: [ + { role: 'user', content: 'Weather in Berlin and Paris?' }, + { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'call_berlin', + type: 'function', + function: { + name: 'lookup_weather', + arguments: JSON.stringify({ location: 'Berlin' }), + }, + }, + { + id: 'call_paris', + type: 'function', + function: { + name: 'lookup_weather', + arguments: JSON.stringify({ location: 'Paris' }), + }, + }, + ], + }, + { role: 'tool', toolCallId: 'call_berlin', content: '{"temp":72}' }, + { role: 'tool', toolCallId: 'call_paris', content: '{"temp":68}' }, + ], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + expect(mocks.betaMessagesCreate).toHaveBeenCalledTimes(1) + const [payload] = mocks.betaMessagesCreate.mock.calls[0] + + // Both tool results should be merged into a single user message + expect(payload.messages).toEqual([ + { + role: 'user', + content: 'Weather in Berlin and Paris?', + }, + { + role: 'assistant', + content: [ + { + type: 'tool_use', + id: 'call_berlin', + name: 'lookup_weather', + input: { location: 'Berlin' }, + }, + { + type: 'tool_use', + id: 'call_paris', + name: 'lookup_weather', + input: { location: 'Paris' }, + }, + ], + }, + { + role: 'user', + content: [ + { + type: 'tool_result', + tool_use_id: 'call_berlin', + content: '{"temp":72}', + }, + { + type: 'tool_result', + tool_use_id: 'call_paris', + content: '{"temp":68}', + }, + ], + }, + ]) + + // Verify roles strictly alternate + const roles = payload.messages.map((m: any) => m.role) + for (let i = 1; i < roles.length; i++) { + expect(roles[i]).not.toBe(roles[i - 1]) + } + }) + + it('handles full multi-turn flow with duplicate tool results, empty assistant, and follow-up', async () => { + // This reproduces the exact bug scenario from the testing panel: + // 1. Assistant calls getGuitars + recommendGuitar (with text) + // 2. Tool results include duplicates (from both tool-result and tool-call output) + // 3. An empty assistant message exists (from the client tool round-trip) + // 4. User sends a follow-up message + // All of: duplicates, empty assistant, consecutive user messages must be handled. + const mockStream = (async function* () { + yield { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + } + yield { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Electric guitars available' }, + } + yield { type: 'content_block_stop', index: 0 } + yield { + type: 'message_delta', + delta: { stop_reason: 'end_turn' }, + usage: { output_tokens: 5 }, + } + yield { type: 'message_stop' } + })() + + mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + + const adapter = createAdapter('claude-3-7-sonnet-20250219') + + const chunks: StreamChunk[] = [] + for await (const chunk of chat({ + adapter, + messages: [ + { role: 'user', content: "what's a good acoustic guitar?" }, + { + role: 'assistant', + content: "I'll help you find a good acoustic guitar!", + toolCalls: [ + { + id: 'toolu_getGuitars', + type: 'function', + function: { name: 'getGuitars', arguments: '' }, + }, + { + id: 'toolu_recommend', + type: 'function', + function: { + name: 'recommendGuitar', + arguments: '{"id": 7}', + }, + }, + ], + }, + // Tool result from tool-result part + { + role: 'tool', + toolCallId: 'toolu_getGuitars', + content: '[{"id":7,"name":"Guitar"}]', + }, + // Tool result from tool-result part + { + role: 'tool', + toolCallId: 'toolu_recommend', + content: '{"id":7}', + }, + // DUPLICATE tool result from tool-call output field + { + role: 'tool', + toolCallId: 'toolu_recommend', + content: '{"id":7}', + }, + // Empty assistant from client tool round-trip + { role: 'assistant', content: null }, + // User follow-up + { role: 'user', content: "what's a good electric guitar?" }, + ], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + expect(mocks.betaMessagesCreate).toHaveBeenCalledTimes(1) + const [payload] = mocks.betaMessagesCreate.mock.calls[0] + + // Verify: no consecutive same-role messages, no empty assistants, no duplicate tool_results + const roles = payload.messages.map((m: any) => m.role) + for (let i = 1; i < roles.length; i++) { + expect(roles[i]).not.toBe(roles[i - 1]) + } + + // Should have exactly 3 messages: user, assistant, user (merged tool results + follow-up) + expect(payload.messages).toHaveLength(3) + expect(payload.messages[0].role).toBe('user') + expect(payload.messages[1].role).toBe('assistant') + expect(payload.messages[2].role).toBe('user') + + // The merged user message should have tool results (de-duplicated) + follow-up text + const lastUserContent = payload.messages[2].content + expect(Array.isArray(lastUserContent)).toBe(true) + + // Count tool_result blocks - should have 2 (one per tool), not 3 (no duplicate) + const toolResultBlocks = lastUserContent.filter( + (b: any) => b.type === 'tool_result', + ) + expect(toolResultBlocks).toHaveLength(2) + + // Should have the follow-up text + const textBlocks = lastUserContent.filter((b: any) => b.type === 'text') + expect(textBlocks).toHaveLength(1) + expect(textBlocks[0].text).toBe("what's a good electric guitar?") + }) + + it('filters out empty assistant messages from conversation history', async () => { + // An empty assistant message (from a previous failed request) should be filtered out. + const mockStream = (async function* () { + yield { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + } + yield { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Response' }, + } + yield { + type: 'content_block_stop', + index: 0, + } + yield { + type: 'message_delta', + delta: { stop_reason: 'end_turn' }, + usage: { output_tokens: 3 }, + } + yield { type: 'message_stop' } + })() + + mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + + const adapter = createAdapter('claude-3-7-sonnet-20250219') + + const chunks: StreamChunk[] = [] + for await (const chunk of chat({ + adapter, + messages: [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: '' }, // Empty assistant from failed request + { role: 'user', content: 'Try again' }, + ], + })) { + chunks.push(chunk) + } + + expect(mocks.betaMessagesCreate).toHaveBeenCalledTimes(1) + const [payload] = mocks.betaMessagesCreate.mock.calls[0] + + // The empty assistant message should be filtered out, and consecutive + // user messages should be merged + expect(payload.messages).toEqual([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { type: 'text', text: 'Try again' }, + ], + }, + ]) + }) +}) + +describe('Anthropic stream processing', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('does not emit duplicate RUN_FINISHED from message_stop after message_delta', async () => { + // message_delta with stop_reason already emits RUN_FINISHED. + // message_stop should NOT emit another one. + const mockStream = (async function* () { + yield { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + } + yield { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Hello' }, + } + yield { + type: 'content_block_stop', + index: 0, + } + yield { + type: 'message_delta', + delta: { stop_reason: 'end_turn' }, + usage: { output_tokens: 3 }, + } + yield { type: 'message_stop' } + })() + + mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + + const adapter = createAdapter('claude-3-7-sonnet-20250219') + + const chunks: StreamChunk[] = [] + for await (const chunk of chat({ + adapter, + messages: [{ role: 'user', content: 'Hi' }], + })) { + chunks.push(chunk) + } + + // Should have exactly ONE RUN_FINISHED event (from message_delta), not two + const runFinished = chunks.filter((c) => c.type === 'RUN_FINISHED') + expect(runFinished).toHaveLength(1) + expect(runFinished[0]).toMatchObject({ + type: 'RUN_FINISHED', + finishReason: 'stop', + }) + }) + + it('does not emit TEXT_MESSAGE_END for tool_use content blocks', async () => { + // When text is followed by a tool_use block, TEXT_MESSAGE_END should only + // fire once (for the text block), not again when the tool block stops. + const mockStream = (async function* () { + // Text block + yield { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + } + yield { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Let me check' }, + } + yield { type: 'content_block_stop', index: 0 } + // Tool use block + yield { + type: 'content_block_start', + index: 1, + content_block: { + type: 'tool_use', + id: 'tool_1', + name: 'lookup_weather', + }, + } + yield { + type: 'content_block_delta', + index: 1, + delta: { + type: 'input_json_delta', + partial_json: '{"location":"Berlin"}', + }, + } + yield { type: 'content_block_stop', index: 1 } + yield { + type: 'message_delta', + delta: { stop_reason: 'tool_use' }, + usage: { output_tokens: 10 }, + } + yield { type: 'message_stop' } + })() + + mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + + const adapter = createAdapter('claude-3-7-sonnet-20250219') + + const chunks: StreamChunk[] = [] + for await (const chunk of chat({ + adapter, + messages: [{ role: 'user', content: 'Weather in Berlin?' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + // TEXT_MESSAGE_END should appear exactly once (for the text block) + const textMessageEnds = chunks.filter((c) => c.type === 'TEXT_MESSAGE_END') + expect(textMessageEnds).toHaveLength(1) + + // RUN_FINISHED should appear exactly once (from message_delta with tool_use) + const runFinished = chunks.filter((c) => c.type === 'RUN_FINISHED') + expect(runFinished).toHaveLength(1) + expect(runFinished[0]).toMatchObject({ + finishReason: 'tool_calls', + }) + }) }) diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index e95389fb..65554a44 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -230,31 +230,37 @@ export class ChatClient { */ private async processStream( source: AsyncIterable, - ): Promise { + ): Promise { // Generate a stream ID for this streaming operation this.currentStreamId = this.generateUniqueId('stream') - // Start a new assistant message - const messageId = this.processor.startAssistantMessage() - this.currentMessageId = messageId - - // Emit message appended event for the new assistant message - const assistantMessage: UIMessage = { - id: messageId, - role: 'assistant', - parts: [], - createdAt: new Date(), - } - this.events.messageAppended( - assistantMessage, - this.currentStreamId || undefined, - ) + // Prepare for a new assistant message (created lazily on first content) + this.processor.prepareAssistantMessage() // Process each chunk for await (const chunk of source) { this.callbacksRef.current.onChunk(chunk) this.processor.processChunk(chunk) + // Track the message ID once the processor lazily creates it + if (!this.currentMessageId) { + const newMessageId = + this.processor.getCurrentAssistantMessageId() ?? null + if (newMessageId) { + this.currentMessageId = newMessageId + // Emit message appended event now that the assistant message exists + const assistantMessage = this.processor + .getMessages() + .find((m: UIMessage) => m.id === newMessageId) + if (assistantMessage) { + this.events.messageAppended( + assistantMessage, + this.currentStreamId || undefined, + ) + } + } + } + // Yield control back to event loop to allow UI updates await new Promise((resolve) => setTimeout(resolve, 0)) } @@ -268,24 +274,20 @@ export class ChatClient { // Finalize the stream this.processor.finalizeStream() + // Get the message ID (may be null if no content arrived) + const messageId = this.processor.getCurrentAssistantMessageId() + // Clear the current stream and message IDs this.currentStreamId = null this.currentMessageId = null - // Return the assistant message - const messages = this.processor.getMessages() - const finalAssistantMessage = messages.find( - (m: UIMessage) => m.id === messageId, - ) + // Return the assistant message if one was created + if (messageId) { + const messages = this.processor.getMessages() + return messages.find((m: UIMessage) => m.id === messageId) || null + } - return ( - finalAssistantMessage || { - id: messageId, - role: 'assistant', - parts: [], - createdAt: new Date(), - } - ) + return null } /** diff --git a/packages/typescript/ai-gemini/src/adapters/text.ts b/packages/typescript/ai-gemini/src/adapters/text.ts index 031298c1..dd27209a 100644 --- a/packages/typescript/ai-gemini/src/adapters/text.ts +++ b/packages/typescript/ai-gemini/src/adapters/text.ts @@ -16,6 +16,7 @@ import type { StructuredOutputResult, } from '@tanstack/ai/adapters' import type { + Content, GenerateContentParameters, GenerateContentResponse, GoogleGenAI, @@ -194,6 +195,7 @@ export class GeminiTextAdapter< ): AsyncIterable { const timestamp = Date.now() let accumulatedContent = '' + let accumulatedThinking = '' const toolCallMap = new Map< string, { name: string; args: string; index: number; started: boolean } @@ -239,15 +241,17 @@ export class GeminiTextAdapter< } } + accumulatedThinking += part.text yield { type: 'STEP_FINISHED', stepId: stepId || generateId(this.name), model, timestamp, delta: part.text, - content: part.text, + content: accumulatedThinking, } - } else { + } else if (part.text.trim()) { + // Skip whitespace-only text parts (e.g. "\n" during auto-continuation) // Emit TEXT_MESSAGE_START on first text content if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true @@ -332,7 +336,8 @@ export class GeminiTextAdapter< } } } - } else if (chunk.data) { + } else if (chunk.data && chunk.data.trim()) { + // Skip whitespace-only data (e.g. "\n" during auto-continuation) // Emit TEXT_MESSAGE_START on first text content if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true @@ -434,6 +439,11 @@ export class GeminiTextAdapter< } } + // Reset so a new TEXT_MESSAGE_START is emitted if text follows tool calls + if (toolCallMap.size > 0) { + hasEmittedTextMessageStart = false + } + if (finishReason === FinishReason.MAX_TOKENS) { yield { type: 'RUN_ERROR', @@ -520,7 +530,7 @@ export class GeminiTextAdapter< private formatMessages( messages: Array, ): GenerateContentParameters['contents'] { - return messages.map((msg) => { + const formatted = messages.map((msg) => { const role: 'user' | 'model' = msg.role === 'assistant' ? 'model' : 'user' const parts: Array = [] @@ -574,6 +584,67 @@ export class GeminiTextAdapter< parts: parts.length > 0 ? parts : [{ text: '' }], } }) + + // Post-process: Gemini requires strictly alternating user/model roles. + // Tool results are mapped to role:'user', which can create consecutive + // user messages when followed by a new user message. Merge them. + return this.mergeConsecutiveSameRoleMessages(formatted) + } + + /** + * Merge consecutive messages of the same role into a single message. + * Gemini's API requires strictly alternating user/model roles. + * Tool results are mapped to role:'user', which can collide with actual + * user messages in multi-turn conversations. + * + * Also filters out empty model messages (e.g., from a previous failed request) + * and deduplicates functionResponse parts with the same name (tool call ID). + */ + private mergeConsecutiveSameRoleMessages( + messages: Array, + ): Array { + const merged: Array = [] + + for (const msg of messages) { + const parts = msg.parts || [] + + // Skip empty model messages (no parts or only empty text) + if (msg.role === 'model') { + const hasContent = + parts.length > 0 && + !parts.every( + (p) => 'text' in p && (p as { text: string }).text === '', + ) + if (!hasContent) { + continue + } + } + + const prev = merged[merged.length - 1] + if (prev && prev.role === msg.role) { + // Merge parts arrays + prev.parts = [...(prev.parts || []), ...parts] + } else { + merged.push({ ...msg, parts: [...parts] }) + } + } + + // Deduplicate functionResponse parts with the same name (tool call ID) + for (const msg of merged) { + if (!msg.parts) continue + const seenFunctionResponseNames = new Set() + msg.parts = msg.parts.filter((part) => { + if ('functionResponse' in part && part.functionResponse?.name) { + if (seenFunctionResponseNames.has(part.functionResponse.name)) { + return false + } + seenFunctionResponseNames.add(part.functionResponse.name) + } + return true + }) + } + + return merged } private mapCommonOptionsToGemini( diff --git a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts index 9add4bdc..58c75cee 100644 --- a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts +++ b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts @@ -330,6 +330,174 @@ describe('GeminiAdapter through AI', () => { }) }) + it('merges consecutive user messages when tool results precede a follow-up user message', async () => { + const streamChunks = [ + { + candidates: [ + { + content: { + parts: [{ text: 'Here is a recommendation' }], + }, + finishReason: 'STOP', + }, + ], + usageMetadata: { + promptTokenCount: 10, + candidatesTokenCount: 5, + totalTokenCount: 15, + }, + }, + ] + + mocks.generateContentStreamSpy.mockResolvedValue(createStream(streamChunks)) + + const adapter = createTextAdapter() + + for await (const _ of chat({ + adapter, + messages: [ + { role: 'user', content: 'What is the weather in Berlin?' }, + { + role: 'assistant', + content: 'Let me check.', + toolCalls: [ + { + id: 'call_1', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '{"location":"Berlin"}', + }, + }, + ], + }, + { role: 'tool', toolCallId: 'call_1', content: '{"temp":72}' }, + { role: 'user', content: 'What about Paris?' }, + ], + tools: [weatherTool], + })) { + /* consume */ + } + + expect(mocks.generateContentStreamSpy).toHaveBeenCalledTimes(1) + const [payload] = mocks.generateContentStreamSpy.mock.calls[0] + + // Tool result (user) and follow-up user message should be merged + const roles = payload.contents.map((m: any) => m.role) + for (let i = 1; i < roles.length; i++) { + expect(roles[i]).not.toBe(roles[i - 1]) + } + + // Should have 3 messages: user, model, user (merged tool result + follow-up) + expect(payload.contents).toHaveLength(3) + expect(payload.contents[0].role).toBe('user') + expect(payload.contents[1].role).toBe('model') + expect(payload.contents[2].role).toBe('user') + + // Last user message should contain both functionResponse and text + const lastParts = payload.contents[2].parts + const hasFunctionResponse = lastParts.some((p: any) => p.functionResponse) + const hasText = lastParts.some((p: any) => p.text === 'What about Paris?') + expect(hasFunctionResponse).toBe(true) + expect(hasText).toBe(true) + }) + + it('handles full multi-turn with duplicate tool results and empty model message', async () => { + const streamChunks = [ + { + candidates: [ + { + content: { + parts: [{ text: 'Electric guitars available' }], + }, + finishReason: 'STOP', + }, + ], + usageMetadata: { + promptTokenCount: 20, + candidatesTokenCount: 5, + totalTokenCount: 25, + }, + }, + ] + + mocks.generateContentStreamSpy.mockResolvedValue(createStream(streamChunks)) + + const adapter = createTextAdapter() + + for await (const _ of chat({ + adapter, + messages: [ + { role: 'user', content: "what's a good acoustic guitar?" }, + { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'call_guitars', + type: 'function', + function: { name: 'getGuitars', arguments: '' }, + }, + { + id: 'call_recommend', + type: 'function', + function: { + name: 'recommendGuitar', + arguments: '{"id":7}', + }, + }, + ], + }, + { + role: 'tool', + toolCallId: 'call_guitars', + content: '[{"id":7,"name":"Guitar"}]', + }, + { + role: 'tool', + toolCallId: 'call_recommend', + content: '{"id":7}', + }, + // Duplicate tool result (from client tool output) + { + role: 'tool', + toolCallId: 'call_recommend', + content: '{"id":7}', + }, + // Empty assistant from client tool round-trip + { role: 'assistant', content: null }, + // Follow-up + { role: 'user', content: "what's a good electric guitar?" }, + ], + tools: [weatherTool], + })) { + /* consume */ + } + + expect(mocks.generateContentStreamSpy).toHaveBeenCalledTimes(1) + const [payload] = mocks.generateContentStreamSpy.mock.calls[0] + + // No consecutive same-role messages + const roles = payload.contents.map((m: any) => m.role) + for (let i = 1; i < roles.length; i++) { + expect(roles[i]).not.toBe(roles[i - 1]) + } + + // Should be 3 messages: user, model, user + expect(payload.contents).toHaveLength(3) + + // Last user should have deduplicated functionResponses + follow-up text + const lastParts = payload.contents[2].parts + const functionResponses = lastParts.filter((p: any) => p.functionResponse) + // 2 unique tool call IDs, not 3 (duplicate removed) + expect(functionResponses).toHaveLength(2) + + const textParts = lastParts.filter( + (p: any) => p.text === "what's a good electric guitar?", + ) + expect(textParts).toHaveLength(1) + }) + it('uses summarize function with models API', async () => { const summaryText = 'Short and sweet.' mocks.generateContentSpy.mockResolvedValueOnce({ diff --git a/packages/typescript/ai/docs/chat-architecture.md b/packages/typescript/ai/docs/chat-architecture.md new file mode 100644 index 00000000..5625a6ab --- /dev/null +++ b/packages/typescript/ai/docs/chat-architecture.md @@ -0,0 +1,587 @@ +# Chat Stream Architecture + +> **Canonical reference for AG-UI chunk processing.** +> The `StreamProcessor` class cross-references sections of this document. +> When modifying stream handling behavior, update this document first, then code. + +--- + +## Table of Contents + +1. [System Overview](#system-overview) +2. [Single-Shot Text Response](#single-shot-text-response) +3. [Single-Shot Tool Call Response](#single-shot-tool-call-response) +4. [Parallel Tool Calls (Single Shot)](#parallel-tool-calls-single-shot) +5. [Text-Then-Tool Interleaving (Single Shot)](#text-then-tool-interleaving-single-shot) +6. [Thinking/Reasoning Content](#thinkingreasoning-content) +7. [Tool Results and the TOOL_CALL_END Dual Role](#tool-results-and-the-tool_call_end-dual-role) +8. [Client Tools and Approval Flows](#client-tools-and-approval-flows) +9. [Multi-Iteration Agent Loop](#multi-iteration-agent-loop) +10. [Adapter Contract](#adapter-contract) +11. [StreamProcessor Internal State](#streamprocessor-internal-state) +12. [UIMessage Part Ordering Invariants](#uimessage-part-ordering-invariants) +13. [Testing Strategy](#testing-strategy) + +--- + +## System Overview + +``` +┌──────────────────────────────────────────────────────────────┐ +│ chat() function │ +│ │ +│ ┌────────────┐ AG-UI events ┌──────────────────┐ │ +│ │ Adapter │ ──────────────────> │ TextEngine │ │ +│ │ (provider) │ │ (agent loop) │ │ +│ └────────────┘ └────────┬─────────┘ │ +│ │ │ +│ yield AG-UI events │ +│ │ │ +└───────────────────────────────────────────────┼──────────────┘ + │ + v + ┌──────────────────┐ + │ StreamProcessor │ + │ (UI state owner) │ + └──────────────────┘ + │ + v + UIMessage[] updates + (onMessagesChange) +``` + +Two components consume the AG-UI event stream: + +- **TextEngine** (server, `chat/index.ts`) -- Orchestrates the agent loop. Accumulates text and tool calls for its own bookkeeping, decides whether to execute tools and re-invoke the adapter. Yields all events downstream. +- **StreamProcessor** (client, `chat/stream/processor.ts`) -- The single source of truth for `UIMessage[]` state. Consumes AG-UI events and maintains the conversation as an array of `UIMessage` objects with typed parts. + +Both trust the adapter to emit events in the correct order. The processor does **not** attempt error recovery for out-of-order events. + +--- + +## Single-Shot Text Response + +The simplest possible flow. The model returns text with no tool calls. + +### Required chunk sequence from the adapter + +``` +# Event Required Fields Notes +── ───────────────────── ───────────────────────────────────── ────────────────────────────────────────── +1 RUN_STARTED runId MUST be first event +2 TEXT_MESSAGE_START messageId, role: "assistant" MUST precede any TEXT_MESSAGE_CONTENT +3 TEXT_MESSAGE_CONTENT messageId, delta: "Hello" One per token; delta is the increment +4 TEXT_MESSAGE_CONTENT messageId, delta: " world" Appended to previous +5 TEXT_MESSAGE_CONTENT messageId, delta: "!" ... +6 TEXT_MESSAGE_END messageId Signals text segment complete +7 RUN_FINISHED runId, finishReason: "stop" MUST be last meaningful event +``` + +### Processor behavior, step by step + +| # | Chunk received | Processor action | State change | +|---|---|---|---| +| 1 | `RUN_STARTED` | **Ignored.** No handler. | (none) | +| 2 | `TEXT_MESSAGE_START` | `handleTextMessageStartEvent()`: Flushes any pending text from a prior segment, then resets `currentSegmentText = ""` and `lastEmittedText = ""`. | Segment accumulators reset. | +| 3 | `TEXT_MESSAGE_CONTENT` | `handleTextMessageContentEvent()`: Calls `ensureAssistantMessage()` (lazily creates the assistant UIMessage on first content chunk). Appends `delta` to both `currentSegmentText` and `totalTextContent`. Chunk strategy decides emission. `emitTextUpdate()` calls `updateTextPart()` which: if last part is TextPart, replaces its content; otherwise appends a new TextPart. | UIMessage created, parts: `[{ type: "text", content: "Hello" }]` | +| 4 | `TEXT_MESSAGE_CONTENT` | Same as above. `currentSegmentText` is now `"Hello world"`. `updateTextPart()` replaces the last TextPart. | parts: `[{ type: "text", content: "Hello world" }]` | +| 5 | `TEXT_MESSAGE_CONTENT` | Same. | parts: `[{ type: "text", content: "Hello world!" }]` | +| 6 | `TEXT_MESSAGE_END` | **Ignored.** No handler. | (none) | +| 7 | `RUN_FINISHED` | `handleRunFinishedEvent()`: Sets `finishReason = "stop"`, `isDone = true`. Calls `completeAllToolCalls()` (no-op, no tool calls). | Done. | +| — | Stream ends | `finalizeStream()`: Calls `completeAllToolCalls()` (no-op). Flushes any pending text. Fires `onStreamEnd`. | Final. | + +### Key invariant + +> **`updateTextPart()` appends vs. replaces based on the last part's type.** +> If the last part is `type: "text"`, its content is *replaced* (not appended to). +> If the last part is anything else (tool-call, tool-result, thinking), a *new* TextPart is pushed. +> This is how multi-segment text (text before and after tool calls) works. + +### Final UIMessage + +```typescript +{ + id: "msg-...", + role: "assistant", + parts: [ + { type: "text", content: "Hello world!" } + ] +} +``` + +--- + +## Single-Shot Tool Call Response + +The model returns text AND one tool call. This is the most common source of bugs because the tool call events are interleaved with text events in a single stream. + +### Required chunk sequence from the adapter + +``` +# Event Required Fields Notes +─── ───────────────────── ────────────────────────────────────────────────── ────────────────────────────────────────────────── +1 RUN_STARTED runId First event +2 TEXT_MESSAGE_START messageId, role: "assistant" Before any text content +3 TEXT_MESSAGE_CONTENT messageId, delta: "Let me check." Text before tool call +4 TEXT_MESSAGE_END messageId Text segment done +5 TOOL_CALL_START toolCallId: "call_1", toolName: "getWeather" MUST have toolCallId + toolName +6 TOOL_CALL_ARGS toolCallId: "call_1", delta: '{"city":' Incremental JSON string +7 TOOL_CALL_ARGS toolCallId: "call_1", delta: '"NYC"}' More JSON +8 TOOL_CALL_END toolCallId: "call_1", toolName: "getWeather" Signals arguments finalized +9 RUN_FINISHED runId, finishReason: "tool_calls" MUST be "tool_calls" when tools requested +``` + +### Critical ordering rules + +1. **`TOOL_CALL_START` MUST precede any `TOOL_CALL_ARGS` for the same `toolCallId`.** + The processor creates internal state (`InternalToolCallState`) in the `toolCalls` Map on `TOOL_CALL_START`. If `TOOL_CALL_ARGS` arrives for an unknown `toolCallId`, the args are **silently dropped** (the `existingToolCall` lookup fails). + +2. **`TOOL_CALL_END` MUST come after all `TOOL_CALL_ARGS` for that `toolCallId`.** + `TOOL_CALL_END` transitions the tool call to `input-complete` state and does a final JSON parse of accumulated arguments. Any `TOOL_CALL_ARGS` after `TOOL_CALL_END` for the same ID will still be processed (appending to arguments), but the state has already been set to `input-complete`. + +3. **`RUN_FINISHED` with `finishReason: "tool_calls"` MUST come last.** + The TextEngine uses this to decide whether to enter the tool execution phase. The StreamProcessor uses it as a signal to force-complete any tool calls still not in `input-complete` state (safety net). + +4. **Text events and tool call events can interleave**, but within each tool call the order MUST be: `START -> ARGS* -> END`. + +### Processor behavior, step by step + +| # | Chunk | Processor action | UIMessage parts after | +|---|---|---|---| +| 1 | `RUN_STARTED` | Ignored. | `[]` | +| 2 | `TEXT_MESSAGE_START` | Resets segment accumulators. | `[]` | +| 3 | `TEXT_MESSAGE_CONTENT` | `ensureAssistantMessage()` creates UIMessage. Appends delta. `updateTextPart()` creates TextPart. | `[text: "Let me check."]` | +| 4 | `TEXT_MESSAGE_END` | Ignored. | `[text: "Let me check."]` | +| 5 | `TOOL_CALL_START` | `ensureAssistantMessage()` (already exists). Creates `InternalToolCallState` in Map with `state: "awaiting-input"`. `updateToolCallPart()` appends a ToolCallPart. | `[text: "Let me check.", tool-call(call_1, awaiting-input, args: "")]` | +| 6 | `TOOL_CALL_ARGS` | Appends delta to `arguments`. State transitions `awaiting-input -> input-streaming`. `updateToolCallPart()` updates existing ToolCallPart by ID. | `[text: "Let me check.", tool-call(call_1, input-streaming, args: '{"city":')]` | +| 7 | `TOOL_CALL_ARGS` | Appends delta. Stays `input-streaming`. Arguments now complete. | `[text: "Let me check.", tool-call(call_1, input-streaming, args: '{"city":"NYC"}')]` | +| 8 | `TOOL_CALL_END` | `completeToolCall()`: Sets `state: "input-complete"`. Does final JSON parse. No `result` field, so no tool-result part created. | `[text: "Let me check.", tool-call(call_1, input-complete, args: '{"city":"NYC"}')]` | +| 9 | `RUN_FINISHED` | Sets `finishReason = "tool_calls"`. `completeAllToolCalls()`: iterates all tool calls; `call_1` already `input-complete`, so no-op. | Same as above. | +| — | Stream ends | `finalizeStream()`: Flushes pending text (already emitted). Fires `onStreamEnd`. | Final. | + +### Tool call state transitions + +``` +TOOL_CALL_START received → state: "awaiting-input" (created in Map + UIMessage) +First TOOL_CALL_ARGS → state: "input-streaming" (only if delta is non-empty) +Subsequent TOOL_CALL_ARGS → state: "input-streaming" (no change, args accumulate) +TOOL_CALL_END received → state: "input-complete" (final parse, authoritative) + OR +RUN_FINISHED received → state: "input-complete" (safety net via completeAllToolCalls) +``` + +### What the processor does NOT do at this stage + +- It does **not** execute the tool. That's the TextEngine's job. +- It does **not** create a `tool-result` part. That happens later when `TOOL_CALL_END` arrives WITH a `result` field (from TextEngine after execution). +- It does **not** validate that `toolName` matches a known tool. The processor is tool-definition-agnostic. + +--- + +## Parallel Tool Calls (Single Shot) + +The model requests multiple tools in one response. Adapters may interleave the events. + +### Required chunk sequence (interleaved) + +``` +# Event Key Fields +─── ───────────────────── ────────────────────────────────────────── +1 RUN_STARTED runId +2 TOOL_CALL_START toolCallId: "call_1", toolName: "getWeather", index: 0 +3 TOOL_CALL_START toolCallId: "call_2", toolName: "getTime", index: 1 +4 TOOL_CALL_ARGS toolCallId: "call_1", delta: '{"city":"NYC"}' +5 TOOL_CALL_ARGS toolCallId: "call_2", delta: '{"tz":"EST"}' +6 TOOL_CALL_END toolCallId: "call_1", toolName: "getWeather" +7 TOOL_CALL_END toolCallId: "call_2", toolName: "getTime" +8 RUN_FINISHED finishReason: "tool_calls" +``` + +### Alternative valid ordering (sequential) + +``` +TOOL_CALL_START call_1 +TOOL_CALL_ARGS call_1 +TOOL_CALL_END call_1 +TOOL_CALL_START call_2 +TOOL_CALL_ARGS call_2 +TOOL_CALL_END call_2 +RUN_FINISHED +``` + +Both orderings are valid. The processor tracks tool calls by `toolCallId` in a `Map`, not by position. The `index` field on `TOOL_CALL_START` is stored for reference but the Map key is always `toolCallId`. + +### Processor guarantees + +- Each `TOOL_CALL_START` creates a new entry in the Map. Duplicate `toolCallId` on a second `TOOL_CALL_START` is a no-op (the `if (!existingToolCall)` guard). +- `TOOL_CALL_ARGS` finds its tool call by `toolCallId`. If the ID is unknown, the event is silently dropped. +- `TOOL_CALL_END` finds its tool call by `toolCallId`. If already `input-complete`, it's a no-op (the state guard). +- `RUN_FINISHED` with `completeAllToolCalls()` force-completes any tool call not yet `input-complete`. + +### Final UIMessage parts + +```typescript +[ + { type: "tool-call", id: "call_1", name: "getWeather", arguments: '{"city":"NYC"}', state: "input-complete" }, + { type: "tool-call", id: "call_2", name: "getTime", arguments: '{"tz":"EST"}', state: "input-complete" }, +] +``` + +Parts are ordered by when `TOOL_CALL_START` was first received, since `updateToolCallPart()` appends new parts at the end. + +--- + +## Text-Then-Tool Interleaving (Single Shot) + +A response with text, then a tool call, then the tool result comes back, then more text. This tests the multi-segment text logic. + +### Chunk sequence (first adapter call + TextEngine tool result + second adapter call) + +``` +── First adapter stream ────────────────────────────────── +1 RUN_STARTED +2 TEXT_MESSAGE_START messageId: "m1" +3 TEXT_MESSAGE_CONTENT delta: "Checking weather..." +4 TEXT_MESSAGE_END +5 TOOL_CALL_START toolCallId: "call_1", toolName: "getWeather" +6 TOOL_CALL_ARGS toolCallId: "call_1", delta: '{"city":"NYC"}' +7 TOOL_CALL_END toolCallId: "call_1" +8 RUN_FINISHED finishReason: "tool_calls" + +── TextEngine executes tool, yields result ────────────── +9 TOOL_CALL_END toolCallId: "call_1", result: '{"temp":"72F"}' + +── Second adapter stream ───────────────────────────────── +10 TEXT_MESSAGE_START messageId: "m2" +11 TEXT_MESSAGE_CONTENT delta: "It's 72°F in NYC." +12 TEXT_MESSAGE_END +13 RUN_FINISHED finishReason: "stop" +``` + +### Critical: TEXT_MESSAGE_START resets the text segment + +At step 10, `handleTextMessageStartEvent()`: +- Flushes any pending text from the previous segment. +- Resets `currentSegmentText = ""` and `lastEmittedText = ""`. + +This means the next `TEXT_MESSAGE_CONTENT` (step 11) starts accumulating into a fresh segment. When `emitTextUpdate()` is called, `updateTextPart()` sees the last part is a `tool-result` (not text), so it **pushes a new TextPart** rather than replacing. + +### UIMessage parts progression + +``` +After step 3: [text: "Checking weather..."] +After step 5: [text: "Checking weather...", tool-call(call_1, awaiting-input)] +After step 7: [text: "Checking weather...", tool-call(call_1, input-complete)] +After step 9: [text: "Checking weather...", tool-call(call_1, input-complete, output: {...}), tool-result(call_1, complete)] +After step 11: [text: "Checking weather...", tool-call(call_1, ...), tool-result(call_1, ...), text: "It's 72°F in NYC."] +``` + +Note: **two separate TextParts** in the final message. This preserves the interleaving for accurate round-trip conversion to ModelMessages. + +--- + +## Thinking/Reasoning Content + +### Chunk sequence + +``` +1 RUN_STARTED +2 STEP_STARTED stepId, stepType: "thinking" (informational, ignored by processor) +3 STEP_FINISHED stepId, delta: "Let me think" +4 STEP_FINISHED stepId, delta: " about this..." +5 TEXT_MESSAGE_START +6 TEXT_MESSAGE_CONTENT delta: "Here's my answer." +7 TEXT_MESSAGE_END +8 RUN_FINISHED finishReason: "stop" +``` + +### Processor behavior + +- `STEP_STARTED` -- Ignored (no handler). +- `STEP_FINISHED` -- `handleStepFinishedEvent()`: Calls `ensureAssistantMessage()`. Appends `delta` to `thinkingContent`. `updateThinkingPart()` either updates existing ThinkingPart or pushes a new one. Always **replaces** (not accumulates) -- the full `thinkingContent` string is set. + +### Final UIMessage parts + +```typescript +[ + { type: "thinking", content: "Let me think about this..." }, + { type: "text", content: "Here's my answer." } +] +``` + +--- + +## Tool Results and the TOOL_CALL_END Dual Role + +`TOOL_CALL_END` serves two purposes depending on whether `result` is present: + +### Without `result` (from adapter) + +Signals that the tool call's **input arguments** are finalized. +- Transitions state to `input-complete`. +- Does final JSON parse of accumulated arguments. +- If `input` field is provided, uses it as canonical parsed arguments. +- **No tool-result part is created.** + +### With `result` (from TextEngine after execution) + +Signals that the tool has been **executed** and the result is available. +- Still transitions state to `input-complete` (if not already). +- Creates/updates two things: + 1. `updateToolCallWithOutput()` -- Sets `output` on the tool-call part (for UI rendering consistency). + 2. `updateToolResultPart()` -- Creates a `tool-result` part (for LLM conversation history). +- The `result` field is a JSON string. + +### This distinction is critical + +Adapters emit `TOOL_CALL_END` **without** `result` -- they're signaling "arguments are done." +The TextEngine emits `TOOL_CALL_END` **with** `result` -- it's signaling "tool was executed, here's the output." + +If an adapter incorrectly includes `result`, the processor will store it as a tool-result part immediately, which may cause unexpected behavior in the agent loop. + +--- + +## Client Tools and Approval Flows + +These are handled via `CUSTOM` events emitted by the TextEngine (not the adapter). + +### Client tool flow + +``` +CUSTOM { name: "tool-input-available", data: { toolCallId, toolName, input } } +``` + +Processor action: Fires `onToolCall` callback. Client calls `addToolResult(toolCallId, output)`. + +### Approval flow + +``` +CUSTOM { name: "approval-requested", data: { toolCallId, toolName, input, approval: { id, needsApproval: true } } } +``` + +Processor action: +1. `updateToolCallApproval()` -- Sets `state: "approval-requested"` and adds `approval` metadata to tool-call part. +2. Fires `onApprovalRequest` callback. +3. Client calls `addToolApprovalResponse(approvalId, true/false)`. +4. `updateToolCallApprovalResponse()` -- Sets `state: "approval-responded"` and `approval.approved`. + +--- + +## Multi-Iteration Agent Loop + +The TextEngine may call the adapter multiple times within a single `chat()` call (when tools are involved). + +### Iteration cycle + +``` +do { + processText: Call adapter.chatStream(), yield events, accumulate tool calls + executeToolCalls: If finishReason == "tool_calls", execute tools, yield TOOL_CALL_END with results +} while (agentLoopStrategy says continue) +``` + +### What the StreamProcessor sees + +The processor receives a **single flat stream** of AG-UI events from all iterations. It does not know about iterations. Each `TEXT_MESSAGE_START` resets its text segment accumulator, which is how multi-iteration text works correctly. + +### StreamProcessor.process() vs manual processChunk() + +- `process(stream)` -- Consumes the entire async iterable, calls `processChunk()` for each event, then `finalizeStream()`. Used for simple consumption. +- `processChunk(chunk)` -- Processes a single event. Used when the consumer needs to interleave its own logic (e.g., the client framework integration). + +--- + +## Adapter Contract + +### Mandatory events and ordering + +Every adapter `chatStream()` implementation MUST emit events in this order: + +``` +RUN_STARTED ← exactly once, first + (TEXT_MESSAGE_START ← before any text content + TEXT_MESSAGE_CONTENT* ← zero or more text deltas + TEXT_MESSAGE_END)? ← if text was started + (TOOL_CALL_START ← before any args for this tool + TOOL_CALL_ARGS* ← zero or more arg deltas + TOOL_CALL_END)? ← for each tool call +RUN_FINISHED | RUN_ERROR ← exactly once, last +``` + +In EBNF-like notation: + +``` +Stream ::= RUN_STARTED Content* Terminal +Content ::= TextBlock | ToolCall | ThinkingBlock +TextBlock ::= TEXT_MESSAGE_START TEXT_MESSAGE_CONTENT* TEXT_MESSAGE_END +ToolCall ::= TOOL_CALL_START TOOL_CALL_ARGS* TOOL_CALL_END +ThinkingBlock ::= STEP_STARTED? STEP_FINISHED+ +Terminal ::= RUN_FINISHED | RUN_ERROR +``` + +### Event field requirements + +#### RUN_STARTED +- `runId: string` -- Unique run identifier. + +#### TEXT_MESSAGE_START +- `messageId: string` -- Unique message identifier. +- `role: "assistant"` -- Always assistant for generated messages. + +#### TEXT_MESSAGE_CONTENT +- `messageId: string` -- Must match the preceding TEXT_MESSAGE_START. +- `delta: string` -- **Non-empty** incremental text token. +- `content?: string` -- (Optional) Full accumulated text. For debugging only; processor uses `delta`. + +#### TEXT_MESSAGE_END +- `messageId: string` -- Must match. + +#### TOOL_CALL_START +- `toolCallId: string` -- **Globally unique** within this stream. The processor uses this as a Map key. +- `toolName: string` -- Name of the tool being called. Must not be empty. +- `index?: number` -- (Optional) For parallel tool calls. Defaults to `toolCalls.size` (order of arrival). + +#### TOOL_CALL_ARGS +- `toolCallId: string` -- Must match a preceding TOOL_CALL_START. +- `delta: string` -- Incremental JSON argument fragment. Concatenated by the processor. + +#### TOOL_CALL_END +- `toolCallId: string` -- Must match. +- `toolName: string` -- Must match. +- `input?: unknown` -- (Optional) Parsed arguments. If provided, overrides the processor's accumulated string parse. +- `result?: string` -- (Optional) **Adapters MUST NOT set this.** Reserved for TextEngine tool execution results. + +#### RUN_FINISHED +- `runId: string` -- Must match RUN_STARTED. +- `finishReason: "stop" | "length" | "content_filter" | "tool_calls" | null` + - `"tool_calls"` -- The model wants tool execution. TextEngine will enter tool phase. + - `"stop"` -- Normal completion. TextEngine exits loop. + - Other values are informational. +- `usage?: { promptTokens, completionTokens, totalTokens }` -- (Optional) Token usage. + +#### RUN_ERROR +- `error: { message: string, code?: string }` -- Error details. + +#### STEP_FINISHED (thinking) +- `stepId: string` -- Step identifier. +- `delta: string` -- Incremental thinking content. +- `content?: string` -- (Optional) Full accumulated thinking. + +### What adapters MUST NOT do + +1. **Do not emit `TOOL_CALL_ARGS` before `TOOL_CALL_START` for the same `toolCallId`.** The processor will silently drop the args (no entry in the Map). + +2. **Do not emit `TOOL_CALL_END` with a `result` field.** This field is reserved for the TextEngine. If an adapter sets it, the processor will immediately create a tool-result part, corrupting the agent loop flow. + +3. **Do not omit `RUN_FINISHED`.** Without it, the TextEngine cannot determine whether to execute tools. The StreamProcessor's `finalizeStream()` will force-complete tool calls as a safety net, but this is a fallback for network errors -- not normal operation. + +4. **Do not emit `TEXT_MESSAGE_CONTENT` without a preceding `TEXT_MESSAGE_START`.** The text will still be processed (the processor doesn't validate this), but `TEXT_MESSAGE_START` is needed to reset the segment accumulator for correct multi-segment text after tool calls. + +5. **Do not use empty strings for `toolCallId` or `toolName`.** The `ToolCallManager.getToolCalls()` filters out entries where `name.trim().length === 0`. + +6. **Do not reuse `toolCallId` values within a single stream.** The processor's Map deduplicates by ID -- a second `TOOL_CALL_START` with the same ID is silently ignored. + +### What adapters SHOULD do (recommended) + +1. Provide `TOOL_CALL_END.input` with parsed arguments. This gives the processor an authoritative parse rather than relying on partial-JSON parsing of accumulated deltas. + +2. Provide `TEXT_MESSAGE_CONTENT.content` with accumulated text. Useful for debugging, but the processor always uses `delta` for accumulation. + +3. Use deterministic `toolCallId` values from the provider (e.g., OpenAI's `call_*` IDs) rather than generating random ones, to enable debugging and replay. + +--- + +## StreamProcessor Internal State + +### Stream-scoped state (reset per `process()` or `prepareAssistantMessage()`) + +| Field | Type | Purpose | +|---|---|---| +| `totalTextContent` | `string` | All text across all segments (for `ProcessorResult.content`) | +| `currentSegmentText` | `string` | Text in the current segment (reset on `TEXT_MESSAGE_START`) | +| `lastEmittedText` | `string` | Last text sent to `updateTextPart()` (prevents duplicate emissions) | +| `thinkingContent` | `string` | Accumulated thinking text | +| `toolCalls` | `Map` | Active tool calls keyed by `toolCallId` | +| `toolCallOrder` | `string[]` | Order in which tool calls were first seen | +| `finishReason` | `string \| null` | From `RUN_FINISHED` | +| `isDone` | `boolean` | Set by `RUN_FINISHED` | + +### Conversation-scoped state (persists across streams) + +| Field | Type | Purpose | +|---|---|---| +| `messages` | `UIMessage[]` | The full conversation | +| `currentAssistantMessageId` | `string \| null` | ID of the message being streamed (null before first content) | + +### The lazy message creation pattern + +The assistant UIMessage is **not** created when `prepareAssistantMessage()` is called. It's created lazily by `ensureAssistantMessage()` on the first content-bearing chunk. This prevents empty assistant messages from appearing in the UI during auto-continuation iterations that produce no content. + +Content-bearing chunks that trigger `ensureAssistantMessage()`: +- `TEXT_MESSAGE_CONTENT` +- `TOOL_CALL_START` +- `STEP_FINISHED` +- `RUN_ERROR` + +--- + +## UIMessage Part Ordering Invariants + +The parts array within a single assistant UIMessage maintains a strict ordering that mirrors the stream: + +1. **ThinkingPart** (if present) -- Always managed as a single part, updated in place. +2. **TextPart** (first segment) -- Created on first text content. +3. **ToolCallPart(s)** -- Appended when tool calls start. +4. **ToolResultPart(s)** -- Appended when tool results arrive. +5. **TextPart** (second segment) -- Created when text resumes after tool calls. + +The critical logic is in `updateTextPart()`: +- If the **last** part is a TextPart → **replace** its content (same segment continues). +- If the **last** part is anything else → **push** a new TextPart (new segment after tools). + +This means tool-call and tool-result parts act as "segment separators" for text content. + +### Round-trip fidelity + +This ordering is preserved through `uiMessageToModelMessages()` which walks parts in order and flushes assistant segments at tool-result boundaries. See `messages.ts` for the conversion logic. + +--- + +## Testing Strategy + +The StreamProcessor should be tested with recorded chunk sequences covering: + +### Single-shot scenarios (no agent loop) + +1. **Text only** -- `RUN_STARTED → TEXT_MESSAGE_START → TEXT_MESSAGE_CONTENT* → TEXT_MESSAGE_END → RUN_FINISHED(stop)` +2. **Tool call only (no text)** -- `RUN_STARTED → TOOL_CALL_START → TOOL_CALL_ARGS* → TOOL_CALL_END → RUN_FINISHED(tool_calls)` +3. **Text then tool call** -- Text block followed by tool call block. +4. **Parallel tool calls** -- Multiple interleaved TOOL_CALL_START/ARGS/END sequences. +5. **Empty text deltas** -- Verify empty `delta` in `TOOL_CALL_ARGS` doesn't transition from `awaiting-input`. +6. **Missing TOOL_CALL_END** -- Verify `RUN_FINISHED` safety net completes the tool call. +7. **TOOL_CALL_END with input override** -- Verify `input` field overrides accumulated parse. +8. **Thinking then text** -- STEP_FINISHED events followed by text events. + +### Tool result scenarios + +9. **TOOL_CALL_END with result** -- Verify both `output` on tool-call part and `tool-result` part are created. +10. **Client tool via CUSTOM** -- Verify `onToolCall` fires, then `addToolResult()` creates correct parts. +11. **Approval flow** -- Verify state transitions: `input-complete → approval-requested → approval-responded`. + +### Multi-segment text + +12. **Text → tool → text** -- Verify two separate TextParts with TEXT_MESSAGE_START reset. +13. **Text → tool → result → text** -- Verify parts order: text, tool-call, tool-result, text. + +### Edge cases + +14. **No content (empty stream)** -- Only `RUN_STARTED` + `RUN_FINISHED`. No assistant message created. +15. **RUN_ERROR** -- Verify assistant message is created and `onError` fires. +16. **Duplicate TOOL_CALL_START** (same ID) -- Verify it's a no-op. +17. **TOOL_CALL_ARGS for unknown ID** -- Verify silently dropped. + +Each test should assert: +- The final `UIMessage.parts` array matches expected types, order, and content. +- The `ProcessorResult` has correct `content`, `toolCalls`, and `finishReason`. +- `onMessagesChange` was called the expected number of times. +- Granular events (`onTextUpdate`, `onToolCallStateChange`) fired with correct arguments. diff --git a/packages/typescript/ai/src/activities/chat/index.ts b/packages/typescript/ai/src/activities/chat/index.ts index 7402ba8b..4595671e 100644 --- a/packages/typescript/ai/src/activities/chat/index.ts +++ b/packages/typescript/ai/src/activities/chat/index.ts @@ -555,15 +555,6 @@ class TextEngine< }) } - // Don't overwrite a tool_calls finishReason with a stop finishReason - if ( - this.finishedEvent?.finishReason === 'tool_calls' && - chunk.finishReason === 'stop' - ) { - this.lastFinishReason = chunk.finishReason - return - } - this.finishedEvent = chunk this.lastFinishReason = chunk.finishReason } @@ -800,6 +791,18 @@ class TextEngine< } catch { output = message.content } + // Skip approval response messages (they have pendingExecution marker) + // These are NOT real client tool results — they are synthetic tool messages + // created by uiMessageToModelMessages for approved-but-not-yet-executed tools. + // Treating them as results would prevent the server from requesting actual + // client-side execution after approval (see GitHub issue #225). + if ( + output && + typeof output === 'object' && + (output as any).pendingExecution === true + ) { + continue + } clientToolResults.set(message.toolCallId, output) } } diff --git a/packages/typescript/ai/src/activities/chat/messages.ts b/packages/typescript/ai/src/activities/chat/messages.ts index 43e4a614..c8dc9228 100644 --- a/packages/typescript/ai/src/activities/chat/messages.ts +++ b/packages/typescript/ai/src/activities/chat/messages.ts @@ -1,27 +1,22 @@ import type { - AudioPart, ContentPart, - DocumentPart, - ImagePart, MessagePart, ModelMessage, TextPart, ToolCallPart, - ToolResultPart, UIMessage, - VideoPart, } from '../../types' // =========================== // Message Converters // =========================== /** - * Helper to check if a part is a multimodal content part (image, audio, video, document) + * Check if a MessagePart is a content part (text, image, audio, video, document) + * that maps directly to a ModelMessage ContentPart. */ -function isMultimodalPart( - part: MessagePart, -): part is ImagePart | AudioPart | VideoPart | DocumentPart { +function isContentPart(part: MessagePart): part is ContentPart { return ( + part.type === 'text' || part.type === 'image' || part.type === 'audio' || part.type === 'video' || @@ -30,19 +25,34 @@ function isMultimodalPart( } /** - * Helper to extract text content from string or ContentPart array - * For multimodal content, this extracts only the text parts + * Collapse an array of ContentParts into the most compact ModelMessage content: + * - Empty array → null + * - All text parts → joined string (or null if empty) + * - Mixed content → ContentPart array as-is */ -function getTextContent(content: string | null | Array): string { - if (content === null) { - return '' - } - if (typeof content === 'string') { - return content +function collapseContentParts( + parts: Array, +): string | null | Array { + if (parts.length === 0) return null + + const allText = parts.every((p) => p.type === 'text') + if (allText) { + const joined = parts.map((p) => p.content).join('') + return joined || null } - // Extract text from ContentPart array + + return parts +} + +/** + * Extract text content from ModelMessage content (string, null, or ContentPart array). + * Used when only the text portion is needed (e.g., tool result content). + */ +function getTextContent(content: string | null | Array): string { + if (content === null) return '' + if (typeof content === 'string') return content return content - .filter((part) => part.type === 'text') + .filter((part): part is TextPart => part.type === 'text') .map((part) => part.content) .join('') } @@ -69,157 +79,214 @@ export function convertMessagesToModelMessages( /** * Convert a UIMessage to ModelMessage(s) * - * This conversion handles the parts-based structure: - * - Text parts → content field (string or as part of ContentPart array) - * - Multimodal parts (image, audio, video, document) → ContentPart array - * - ToolCall parts → toolCalls array - * - ToolResult parts → separate role="tool" messages + * Walks the parts array IN ORDER to preserve the interleaving of text, + * tool calls, and tool results. This is critical for multi-round tool + * flows where the model generates text, calls a tool, gets the result, + * then generates more text and calls another tool. + * + * The output preserves the sequential structure: + * text1 → toolCall1 → toolResult1 → text2 → toolCall2 → toolResult2 + * becomes: + * assistant: {content: "text1", toolCalls: [toolCall1]} + * tool: toolResult1 + * assistant: {content: "text2", toolCalls: [toolCall2]} + * tool: toolResult2 * * @param uiMessage - The UIMessage to convert - * @returns An array of ModelMessages (may be multiple if tool results are present) + * @returns An array of ModelMessages preserving part ordering */ export function uiMessageToModelMessages( uiMessage: UIMessage, ): Array { - const messageList: Array = [] - // Skip system messages - they're handled via systemPrompts, not ModelMessages if (uiMessage.role === 'system') { - return messageList + return [] } - // Separate parts by type - // Note: thinking parts are UI-only and not included in ModelMessages - const textParts: Array = [] - const multimodalParts: Array< - ImagePart | AudioPart | VideoPart | DocumentPart - > = [] - const toolCallParts: Array = [] - const toolResultParts: Array = [] + // For non-assistant messages (user), use the simpler path since they + // don't have tool calls or tool results to interleave + if (uiMessage.role !== 'assistant') { + return [buildUserOrToolMessage(uiMessage)] + } + // For assistant messages, walk parts in order to preserve interleaving + return buildAssistantMessages(uiMessage) +} + +/** + * Build a single ModelMessage for user messages (simple path). + * Preserves ordering of text and multimodal content parts. + */ +function buildUserOrToolMessage(uiMessage: UIMessage): ModelMessage { + const contentParts: Array = [] for (const part of uiMessage.parts) { - if (part.type === 'text') { - textParts.push(part) - } else if (isMultimodalPart(part)) { - multimodalParts.push(part) - } else if (part.type === 'tool-call') { - toolCallParts.push(part) - } else if (part.type === 'tool-result') { - toolResultParts.push(part) + if (isContentPart(part)) { + contentParts.push(part) } - // thinking parts are skipped - they're UI-only } - // Build the content field - // If we have multimodal parts, use ContentPart array format - // Otherwise, use simple string format for backward compatibility - let content: string | null | Array - if (multimodalParts.length > 0) { - // Build ContentPart array preserving the order of text and multimodal parts - const contentParts: Array = [] - for (const part of uiMessage.parts) { - if (part.type === 'text') { - contentParts.push(part) - } else if (isMultimodalPart(part)) { - contentParts.push(part) - } + return { + role: uiMessage.role as 'user' | 'assistant' | 'tool', + content: collapseContentParts(contentParts), + } +} + +// Accumulator for building an assistant segment (content + tool calls) +interface AssistantSegment { + contentParts: Array + toolCalls: Array<{ + id: string + type: 'function' + function: { name: string; arguments: string } + }> +} + +function createSegment(): AssistantSegment { + return { contentParts: [], toolCalls: [] } +} + +function isToolCallIncluded(part: ToolCallPart): boolean { + return ( + part.state === 'input-complete' || + part.state === 'approval-responded' || + part.output !== undefined + ) +} + +/** + * Build ModelMessages for an assistant UIMessage, preserving the + * sequential interleaving of text, tool calls, and tool results. + * + * Walks parts in order. Text and tool-call parts accumulate into the + * current "segment". When a tool-result part is encountered, the + * current segment is flushed as an assistant message, then the tool + * result is emitted as a tool message. + */ +function buildAssistantMessages(uiMessage: UIMessage): Array { + const messageList: Array = [] + let current = createSegment() + + // Track emitted tool result IDs to avoid duplicates. + // A tool call can have BOTH an explicit tool-result part AND an output + // field on the tool-call part. We only want one per tool call ID. + const emittedToolResultIds = new Set() + + function flushSegment(): void { + const content = collapseContentParts(current.contentParts) + const hasContent = content !== null + const hasToolCalls = current.toolCalls.length > 0 + + if (hasContent || hasToolCalls) { + messageList.push({ + role: 'assistant', + content, + ...(hasToolCalls && { toolCalls: current.toolCalls }), + }) } - content = contentParts - } else { - // Simple string content for text-only messages - content = textParts.map((p) => p.content).join('') || null + current = createSegment() } - const toolCalls = - toolCallParts.length > 0 - ? toolCallParts - .filter( - (p) => - p.state === 'input-complete' || - p.state === 'approval-responded' || - p.output !== undefined, // Include if has output (client tool result) - ) - .map((p) => ({ - id: p.id, + for (const part of uiMessage.parts) { + switch (part.type) { + case 'text': + case 'image': + case 'audio': + case 'video': + case 'document': + current.contentParts.push(part) + break + + case 'tool-call': + if (isToolCallIncluded(part)) { + current.toolCalls.push({ + id: part.id, type: 'function' as const, function: { - name: p.name, - arguments: p.arguments, + name: part.name, + arguments: part.arguments, }, - })) - : undefined + }) + } + break - // Create the main message - // For multimodal content, we always create a message even if content is an empty array - const hasContent = Array.isArray(content) ? true : content !== null - if (uiMessage.role !== 'assistant' || hasContent || !toolCalls) { - messageList.push({ - role: uiMessage.role, - content, - ...(toolCalls && toolCalls.length > 0 && { toolCalls }), - }) - } else if (toolCalls.length > 0) { - // Assistant message with only tool calls - messageList.push({ - role: 'assistant', - content, - toolCalls, - }) - } + case 'tool-result': + // Flush the current assistant segment before emitting the tool result + flushSegment() - // Add tool result messages for completed tool calls - // This includes: - // 1. Explicit tool-result parts (from server tools) - // 2. Client tool calls with output set - // 3. Approval-responded tool calls (approval result) - for (const toolResultPart of toolResultParts) { - if ( - toolResultPart.state === 'complete' || - toolResultPart.state === 'error' - ) { - messageList.push({ - role: 'tool', - content: toolResultPart.content, - toolCallId: toolResultPart.toolCallId, - }) + // Emit the tool result + if ( + (part.state === 'complete' || part.state === 'error') && + !emittedToolResultIds.has(part.toolCallId) + ) { + messageList.push({ + role: 'tool', + content: part.content, + toolCallId: part.toolCallId, + }) + emittedToolResultIds.add(part.toolCallId) + } + break + + // thinking parts are skipped - they're UI-only + default: + break } } - // Add tool result messages for client tool results (tools with output) - // and approval responses (so iteration tracking works correctly) - for (const toolCallPart of toolCallParts) { - // Client tool with output - add as tool result - if (toolCallPart.output !== undefined && !toolCallPart.approval) { + // Flush any remaining accumulated content + flushSegment() + + // Emit tool results from client tool-call parts with output or approval, + // but only if not already covered by an explicit tool-result part above. + // These are appended at the end since they don't have explicit tool-result + // parts in the parts array to trigger inline emission. + for (const part of uiMessage.parts) { + if (part.type !== 'tool-call') continue + + // Client tool with output - add as tool result (if not already emitted) + if ( + part.output !== undefined && + !part.approval && + !emittedToolResultIds.has(part.id) + ) { messageList.push({ role: 'tool', - content: JSON.stringify(toolCallPart.output), - toolCallId: toolCallPart.id, + content: JSON.stringify(part.output), + toolCallId: part.id, }) + emittedToolResultIds.add(part.id) } // Approval response - add as tool result for iteration tracking - // For APPROVED: includes pendingExecution marker so the tool still executes - // For DENIED: just marks the tool as complete (no execution needed) if ( - toolCallPart.state === 'approval-responded' && - toolCallPart.approval?.approved !== undefined + part.state === 'approval-responded' && + part.approval?.approved !== undefined && + !emittedToolResultIds.has(part.id) ) { - const approved = toolCallPart.approval.approved + const approved = part.approval.approved messageList.push({ role: 'tool', content: JSON.stringify({ approved, - // Mark approved tools as pending execution - they still need to run ...(approved && { pendingExecution: true }), message: approved ? 'User approved this action' : 'User denied this action', }), - toolCallId: toolCallPart.id, + toolCallId: part.id, }) + emittedToolResultIds.add(part.id) } } + // If no messages were produced (e.g., empty parts), emit a minimal assistant message + if (messageList.length === 0) { + messageList.push({ + role: 'assistant', + content: null, + }) + } + return messageList } @@ -241,13 +308,29 @@ export function modelMessageToUIMessage( ): UIMessage { const parts: Array = [] - // Handle content (convert multimodal content to text for UI) - const textContent = getTextContent(modelMessage.content) - if (textContent) { + // Handle tool results (when role is "tool") - only produce tool-result part, + // not a text part (the content IS the tool result, not display text) + if (modelMessage.role === 'tool' && modelMessage.toolCallId) { parts.push({ - type: 'text', - content: textContent, + type: 'tool-result', + toolCallId: modelMessage.toolCallId, + content: getTextContent(modelMessage.content), + state: 'complete', }) + } else if (Array.isArray(modelMessage.content)) { + // Multimodal content - preserve all content parts as MessageParts + for (const part of modelMessage.content) { + parts.push(part) + } + } else { + // String or null content + const textContent = getTextContent(modelMessage.content) + if (textContent) { + parts.push({ + type: 'text', + content: textContent, + }) + } } // Handle tool calls @@ -263,16 +346,6 @@ export function modelMessageToUIMessage( } } - // Handle tool results (when role is "tool") - if (modelMessage.role === 'tool' && modelMessage.toolCallId) { - parts.push({ - type: 'tool-result', - toolCallId: modelMessage.toolCallId, - content: getTextContent(modelMessage.content), - state: 'complete', - }) - } - return { id: id || generateMessageId(), role: modelMessage.role === 'tool' ? 'assistant' : modelMessage.role, diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 4f40ca2d..96d95865 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -12,6 +12,9 @@ * - Thinking/reasoning content * - Recording/replay for testing * - Event-driven architecture for UI updates + * + * @see docs/chat-architecture.md — Canonical reference for AG-UI chunk ordering, + * adapter contract, single-shot flows, and expected UIMessage output. */ import { generateMessageId, uiMessageToModelMessages } from '../messages.js' import { defaultJSONParser } from './json-parser' @@ -101,18 +104,18 @@ export interface StreamProcessorOptions { * StreamProcessor - State machine for processing AI response streams * * Manages the full UIMessage[] conversation and emits events on changes. + * Trusts the adapter contract: adapters emit clean AG-UI events in the + * correct order. * * State tracking: * - Full message array * - Current assistant message being streamed - * - Text content accumulation + * - Text content accumulation (reset on TEXT_MESSAGE_START) * - Multiple parallel tool calls - * - Tool call completion detection + * - Tool call completion via TOOL_CALL_END events * - * Tool call completion is detected when: - * 1. A new tool call starts at a different index - * 2. Text content arrives - * 3. Stream ends + * @see docs/chat-architecture.md#streamprocessor-internal-state — State field reference + * @see docs/chat-architecture.md#adapter-contract — What this class expects from adapters */ export class StreamProcessor { private chunkStrategy: ChunkStrategy @@ -134,9 +137,8 @@ export class StreamProcessor { private toolCalls: Map = new Map() private toolCallOrder: Array = [] private finishReason: string | null = null + private hasError = false private isDone = false - // Track if we've had tool calls since the last text segment started - private hasToolCallsSinceTextStart = false // Recording private recording: ChunkRecording | null = null @@ -213,12 +215,52 @@ export class StreamProcessor { } /** - * Start streaming a new assistant message - * Returns the message ID + * Prepare for a new assistant message stream. + * Does NOT create the message immediately -- the message is created lazily + * when the first content-bearing chunk arrives via ensureAssistantMessage(). + * This prevents empty assistant messages from flickering in the UI when + * auto-continuation produces no content. */ - startAssistantMessage(): string { + prepareAssistantMessage(): void { // Reset stream state for new message this.resetStreamState() + // Clear the current assistant message ID so ensureAssistantMessage() + // will create a fresh message on the first content chunk + this.currentAssistantMessageId = null + } + + /** + * @deprecated Use prepareAssistantMessage() instead. This eagerly creates + * an assistant message which can cause empty message flicker. + */ + startAssistantMessage(): string { + this.prepareAssistantMessage() + return this.ensureAssistantMessage() + } + + /** + * Get the current assistant message ID (if one has been created). + * Returns null if prepareAssistantMessage() was called but no content + * has arrived yet. + */ + getCurrentAssistantMessageId(): string | null { + return this.currentAssistantMessageId + } + + /** + * Lazily create the assistant message if it hasn't been created yet. + * Called by content handlers on the first content-bearing chunk. + * Returns the message ID. + * + * Content-bearing chunks that trigger this: + * TEXT_MESSAGE_CONTENT, TOOL_CALL_START, STEP_FINISHED, RUN_ERROR. + * + * @see docs/chat-architecture.md#streamprocessor-internal-state — Lazy creation pattern + */ + private ensureAssistantMessage(): string { + if (this.currentAssistantMessageId) { + return this.currentAssistantMessageId + } const assistantMessage: UIMessage = { id: generateMessageId(), @@ -398,7 +440,13 @@ export class StreamProcessor { } /** - * Process a single chunk from the stream + * Process a single chunk from the stream. + * + * Central dispatch for all AG-UI events. Each event type maps to a specific + * handler. Events not listed in the switch are intentionally ignored + * (RUN_STARTED, TEXT_MESSAGE_END, STEP_STARTED, STATE_SNAPSHOT, STATE_DELTA). + * + * @see docs/chat-architecture.md#adapter-contract — Expected event types and ordering */ processChunk(chunk: StreamChunk): void { // Record chunk if enabled @@ -412,6 +460,10 @@ export class StreamProcessor { switch (chunk.type) { // AG-UI Events + case 'TEXT_MESSAGE_START': + this.handleTextMessageStartEvent() + break + case 'TEXT_MESSAGE_CONTENT': this.handleTextMessageContentEvent(chunk) break @@ -445,67 +497,53 @@ export class StreamProcessor { break default: - // RUN_STARTED, TEXT_MESSAGE_START, TEXT_MESSAGE_END, STEP_STARTED, + // RUN_STARTED, TEXT_MESSAGE_END, STEP_STARTED, // STATE_SNAPSHOT, STATE_DELTA - no special handling needed break } } /** - * Handle TEXT_MESSAGE_CONTENT event + * Handle TEXT_MESSAGE_START event — marks the beginning of a new text segment. + * Resets segment accumulation so text after tool calls starts fresh. + * + * This is the key mechanism for multi-segment text (text before and after tool + * calls becoming separate TextParts). Without this reset, all text would merge + * into a single TextPart and tool-call interleaving would be lost. + * + * @see docs/chat-architecture.md#single-shot-text-response — Step-by-step text processing + * @see docs/chat-architecture.md#text-then-tool-interleaving-single-shot — Multi-segment text + */ + private handleTextMessageStartEvent(): void { + // Emit any pending text from a previous segment before resetting + if (this.currentSegmentText !== this.lastEmittedText) { + this.emitTextUpdate() + } + this.currentSegmentText = '' + this.lastEmittedText = '' + } + + /** + * Handle TEXT_MESSAGE_CONTENT event. + * + * Accumulates delta into both currentSegmentText (for UI emission) and + * totalTextContent (for ProcessorResult). Lazily creates the assistant + * UIMessage on first content. Uses updateTextPart() which replaces the + * last TextPart or creates a new one depending on part ordering. + * + * @see docs/chat-architecture.md#single-shot-text-response — Text accumulation step-by-step + * @see docs/chat-architecture.md#uimessage-part-ordering-invariants — Replace vs. push logic */ private handleTextMessageContentEvent( chunk: Extract, ): void { - // Content arriving means all current tool calls are complete - this.completeAllToolCalls() - - const previousSegment = this.currentSegmentText - - // Detect if this is a NEW text segment (after tool calls) vs continuation - const isNewSegment = - this.hasToolCallsSinceTextStart && - previousSegment.length > 0 && - this.isNewTextSegment(chunk, previousSegment) - - if (isNewSegment) { - // Emit any accumulated text before starting new segment - if (previousSegment !== this.lastEmittedText) { - this.emitTextUpdate() - } - // Reset SEGMENT text accumulation for the new text segment after tool calls - this.currentSegmentText = '' - this.lastEmittedText = '' - this.hasToolCallsSinceTextStart = false - } + this.ensureAssistantMessage() - const currentText = this.currentSegmentText - let nextText = currentText - - // Prefer delta over content - delta is the incremental change - // Check for both undefined and empty string to avoid "undefined" string concatenation - if (chunk.delta !== undefined && chunk.delta !== '') { - nextText = currentText + chunk.delta - } else if (chunk.content !== undefined && chunk.content !== '') { - // Fallback: use content if delta is not provided - if (chunk.content.startsWith(currentText)) { - nextText = chunk.content - } else if (currentText.startsWith(chunk.content)) { - nextText = currentText - } else { - nextText = currentText + chunk.content - } - } - - // Calculate the delta for totalTextContent - const textDelta = nextText.slice(currentText.length) - this.currentSegmentText = nextText - this.totalTextContent += textDelta + this.currentSegmentText += chunk.delta + this.totalTextContent += chunk.delta - // Use delta for chunk strategy if available - const chunkPortion = chunk.delta || chunk.content || '' const shouldEmit = this.chunkStrategy.shouldEmit( - chunkPortion, + chunk.delta, this.currentSegmentText, ) if (shouldEmit && this.currentSegmentText !== this.lastEmittedText) { @@ -514,13 +552,22 @@ export class StreamProcessor { } /** - * Handle TOOL_CALL_START event + * Handle TOOL_CALL_START event. + * + * Creates a new InternalToolCallState entry in the toolCalls Map and appends + * a ToolCallPart to the UIMessage. Duplicate toolCallId is a no-op. + * + * CRITICAL: This MUST be received before any TOOL_CALL_ARGS for the same + * toolCallId. Args for unknown IDs are silently dropped. + * + * @see docs/chat-architecture.md#single-shot-tool-call-response — Tool call state transitions + * @see docs/chat-architecture.md#parallel-tool-calls-single-shot — Parallel tracking by ID + * @see docs/chat-architecture.md#adapter-contract — Ordering requirements */ private handleToolCallStartEvent( chunk: Extract, ): void { - // Mark that we've seen tool calls since the last text segment - this.hasToolCallsSinceTextStart = true + this.ensureAssistantMessage() const toolCallId = chunk.toolCallId const existingToolCall = this.toolCalls.get(toolCallId) @@ -567,7 +614,16 @@ export class StreamProcessor { } /** - * Handle TOOL_CALL_ARGS event + * Handle TOOL_CALL_ARGS event. + * + * Appends the delta to the tool call's accumulated arguments string. + * Transitions state from awaiting-input → input-streaming on first non-empty delta. + * Attempts partial JSON parse on each update for UI preview. + * + * If toolCallId is not found in the Map (no preceding TOOL_CALL_START), + * this event is silently dropped. + * + * @see docs/chat-architecture.md#single-shot-tool-call-response — Step-by-step tool call processing */ private handleToolCallArgsEvent( chunk: Extract, @@ -617,15 +673,53 @@ export class StreamProcessor { } /** - * Handle TOOL_CALL_END event + * Handle TOOL_CALL_END event — authoritative signal that a tool call's input is finalized. + * + * This event has a DUAL ROLE: + * - Without `result`: Signals arguments are done (from adapter). Transitions to input-complete. + * - With `result`: Signals tool was executed and result is available (from TextEngine). + * Creates both output on the tool-call part AND a tool-result part. + * + * If `input` is provided, it overrides the accumulated string parse as the + * canonical parsed arguments. + * + * @see docs/chat-architecture.md#tool-results-and-the-tool_call_end-dual-role — Full explanation + * @see docs/chat-architecture.md#single-shot-tool-call-response — End-to-end flow */ private handleToolCallEndEvent( chunk: Extract, ): void { - const state: ToolResultState = 'complete' + // Transition the tool call to input-complete (the authoritative completion signal) + const existingToolCall = this.toolCalls.get(chunk.toolCallId) + if (existingToolCall && existingToolCall.state !== 'input-complete') { + const index = this.toolCallOrder.indexOf(chunk.toolCallId) + this.completeToolCall(index, existingToolCall) + // If TOOL_CALL_END provides parsed input, use it as the canonical parsed + // arguments (overrides the accumulated string parse from completeToolCall) + if (chunk.input !== undefined) { + existingToolCall.parsedArguments = chunk.input + } + } - // Update UIMessage if we have a current assistant message + // Update UIMessage if we have a current assistant message and a result if (this.currentAssistantMessageId && chunk.result) { + const state: ToolResultState = 'complete' + + // Step 1: Update the tool-call part's output field (for UI consistency + // with client tools — see GitHub issue #176) + let output: unknown + try { + output = JSON.parse(chunk.result) + } catch { + output = chunk.result + } + this.messages = updateToolCallWithOutput( + this.messages, + chunk.toolCallId, + output, + ) + + // Step 2: Create/update the tool-result part (for LLM conversation history) this.messages = updateToolResultPart( this.messages, this.currentAssistantMessageId, @@ -638,7 +732,14 @@ export class StreamProcessor { } /** - * Handle RUN_FINISHED event + * Handle RUN_FINISHED event. + * + * Records the finishReason and calls completeAllToolCalls() as a safety net + * to force-complete any tool calls that didn't receive an explicit TOOL_CALL_END. + * This handles cases like aborted streams or adapter bugs. + * + * @see docs/chat-architecture.md#single-shot-tool-call-response — finishReason semantics + * @see docs/chat-architecture.md#adapter-contract — Why RUN_FINISHED is mandatory */ private handleRunFinishedEvent( chunk: Extract, @@ -654,33 +755,26 @@ export class StreamProcessor { private handleRunErrorEvent( chunk: Extract, ): void { + this.hasError = true + this.ensureAssistantMessage() // Emit error event this.events.onError?.(new Error(chunk.error.message || 'An error occurred')) } /** - * Handle STEP_FINISHED event (for thinking/reasoning content) + * Handle STEP_FINISHED event (for thinking/reasoning content). + * + * Accumulates delta into thinkingContent and updates a single ThinkingPart + * in the UIMessage (replaced in-place, not appended). + * + * @see docs/chat-architecture.md#thinkingreasoning-content — Thinking flow */ private handleStepFinishedEvent( chunk: Extract, ): void { - const previous = this.thinkingContent - let nextThinking = previous - - // Prefer delta over content - if (chunk.delta && chunk.delta !== '') { - nextThinking = previous + chunk.delta - } else if (chunk.content && chunk.content !== '') { - if (chunk.content.startsWith(previous)) { - nextThinking = chunk.content - } else if (previous.startsWith(chunk.content)) { - nextThinking = previous - } else { - nextThinking = previous + chunk.content - } - } + this.ensureAssistantMessage() - this.thinkingContent = nextThinking + this.thinkingContent += chunk.delta // Update UIMessage if (this.currentAssistantMessageId) { @@ -700,9 +794,14 @@ export class StreamProcessor { } /** - * Handle CUSTOM event - * Handles special custom events like 'tool-input-available' for client-side tool execution - * and 'approval-requested' for tool approval flows + * Handle CUSTOM event. + * + * Handles special custom events emitted by the TextEngine (not adapters): + * - 'tool-input-available': Client tool needs execution. Fires onToolCall. + * - 'approval-requested': Tool needs user approval. Updates tool-call part + * state and fires onApprovalRequest. + * + * @see docs/chat-architecture.md#client-tools-and-approval-flows — Full flow details */ private handleCustomEvent( chunk: Extract, @@ -754,29 +853,13 @@ export class StreamProcessor { } /** - * Detect if an incoming content chunk represents a NEW text segment - */ - private isNewTextSegment( - chunk: Extract, - previous: string, - ): boolean { - // Check if content is present (delta is always defined but may be empty string) - if (chunk.content !== undefined) { - if (chunk.content.length < previous.length) { - return true - } - if ( - !chunk.content.startsWith(previous) && - !previous.startsWith(chunk.content) - ) { - return true - } - } - return false - } - - /** - * Complete all tool calls + * Complete all tool calls — safety net for stream termination. + * + * Called by RUN_FINISHED and finalizeStream(). Force-transitions any tool call + * not yet in input-complete state. Handles cases where TOOL_CALL_END was + * missed (adapter bug, network error, aborted stream). + * + * @see docs/chat-architecture.md#single-shot-tool-call-response — Safety net behavior */ private completeAllToolCalls(): void { this.toolCalls.forEach((toolCall, id) => { @@ -824,7 +907,13 @@ export class StreamProcessor { } /** - * Emit pending text update + * Emit pending text update. + * + * Calls updateTextPart() which has critical append-vs-replace logic: + * - If last UIMessage part is TextPart → replaces its content (same segment). + * - If last part is anything else → pushes new TextPart (new segment after tools). + * + * @see docs/chat-architecture.md#uimessage-part-ordering-invariants — Replace vs. push logic */ private emitTextUpdate(): void { this.lastEmittedText = this.currentSegmentText @@ -854,10 +943,16 @@ export class StreamProcessor { } /** - * Finalize the stream - complete all pending operations + * Finalize the stream — complete all pending operations. + * + * Called when the async iterable ends (stream closed). Acts as the final + * safety net: completes any remaining tool calls, flushes un-emitted text, + * and fires onStreamEnd. + * + * @see docs/chat-architecture.md#single-shot-text-response — Finalization step */ finalizeStream(): void { - // Complete any remaining tool calls + // Safety net: complete any remaining tool calls (e.g. on network errors / aborted streams) this.completeAllToolCalls() // Emit any pending text if not already emitted @@ -865,7 +960,25 @@ export class StreamProcessor { this.emitTextUpdate() } - // Emit stream end event + // Remove the assistant message if it only contains whitespace text + // (no tool calls, no meaningful content). This handles models like Gemini + // that sometimes return just "\n" during auto-continuation. + // Preserve the message on errors so the UI can show error state. + if (this.currentAssistantMessageId && !this.hasError) { + const assistantMessage = this.messages.find( + (m) => m.id === this.currentAssistantMessageId, + ) + if (assistantMessage && this.isWhitespaceOnlyMessage(assistantMessage)) { + this.messages = this.messages.filter( + (m) => m.id !== this.currentAssistantMessageId, + ) + this.emitMessagesChange() + this.currentAssistantMessageId = null + return + } + } + + // Emit stream end event (only if a message was actually created) if (this.currentAssistantMessageId) { const assistantMessage = this.messages.find( (m) => m.id === this.currentAssistantMessageId, @@ -950,8 +1063,8 @@ export class StreamProcessor { this.toolCalls.clear() this.toolCallOrder = [] this.finishReason = null + this.hasError = false this.isDone = false - this.hasToolCallsSinceTextStart = false this.chunkStrategy.reset?.() } @@ -964,6 +1077,17 @@ export class StreamProcessor { this.currentAssistantMessageId = null } + /** + * Check if a message contains only whitespace text and no other meaningful parts + * (no tool calls, tool results, thinking, etc.) + */ + private isWhitespaceOnlyMessage(message: UIMessage): boolean { + if (message.parts.length === 0) return false + return message.parts.every( + (part) => part.type === 'text' && part.content.trim() === '', + ) + } + /** * Replay a recording through the processor */ diff --git a/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts b/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts index 7bb767a6..6ea51284 100644 --- a/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts +++ b/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts @@ -295,10 +295,7 @@ export async function executeToolCalls( // Parse arguments, throwing error if invalid JSON let input: unknown = {} - let argsStr = toolCall.function.arguments.trim() || '{}' - // Normalize "null" to "{}" — can occur when the model streams a tool_use - // block with no input_json_delta events (Anthropic adapter edge case) - if (argsStr === 'null') argsStr = '{}' + const argsStr = toolCall.function.arguments.trim() || '{}' if (argsStr) { try { input = JSON.parse(argsStr) diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index ce2f30fa..4d7ca6e5 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -789,9 +789,9 @@ export interface TextMessageContentEvent extends BaseAGUIEvent { type: 'TEXT_MESSAGE_CONTENT' /** Message identifier */ messageId: string - /** The incremental content token (may be undefined if only content is provided) */ - delta?: string - /** Full accumulated content so far */ + /** The incremental content token */ + delta: string + /** Full accumulated content so far (optional, for debugging) */ content?: string } @@ -864,8 +864,8 @@ export interface StepFinishedEvent extends BaseAGUIEvent { /** Step identifier */ stepId: string /** Incremental thinking content */ - delta?: string - /** Full accumulated thinking content */ + delta: string + /** Full accumulated thinking content (optional, for debugging) */ content?: string } diff --git a/packages/typescript/ai/tests/chat.test.ts b/packages/typescript/ai/tests/chat.test.ts new file mode 100644 index 00000000..d47939c9 --- /dev/null +++ b/packages/typescript/ai/tests/chat.test.ts @@ -0,0 +1,1080 @@ +import { describe, expect, it, vi } from 'vitest' +import { chat, createChatOptions } from '../src/activities/chat/index' +import type { AnyTextAdapter } from '../src/activities/chat/adapter' +import type { StreamChunk, Tool } from '../src/types' + +// ============================================================================ +// Helpers +// ============================================================================ + +/** Create a typed StreamChunk with minimal boilerplate. */ +function chunk( + type: T, + fields: Omit, 'type' | 'timestamp'>, +): StreamChunk { + return { type, timestamp: Date.now(), ...fields } as unknown as StreamChunk +} + +/** Shorthand chunk factories for common AG-UI events. */ +const ev = { + runStarted: (runId = 'run-1') => chunk('RUN_STARTED', { runId }), + textStart: (messageId = 'msg-1') => + chunk('TEXT_MESSAGE_START', { messageId, role: 'assistant' as const }), + textContent: (delta: string, messageId = 'msg-1') => + chunk('TEXT_MESSAGE_CONTENT', { messageId, delta }), + textEnd: (messageId = 'msg-1') => chunk('TEXT_MESSAGE_END', { messageId }), + toolStart: (toolCallId: string, toolName: string, index?: number) => + chunk('TOOL_CALL_START', { + toolCallId, + toolName, + ...(index !== undefined ? { index } : {}), + }), + toolArgs: (toolCallId: string, delta: string) => + chunk('TOOL_CALL_ARGS', { toolCallId, delta }), + toolEnd: ( + toolCallId: string, + toolName: string, + opts?: { input?: unknown; result?: string }, + ) => chunk('TOOL_CALL_END', { toolCallId, toolName, ...opts }), + runFinished: ( + finishReason: + | 'stop' + | 'length' + | 'content_filter' + | 'tool_calls' + | null = 'stop', + runId = 'run-1', + ) => chunk('RUN_FINISHED', { runId, finishReason }), + runError: (message: string, runId = 'run-1') => + chunk('RUN_ERROR', { runId, error: { message } }), + stepFinished: (delta: string, stepId = 'step-1') => + chunk('STEP_FINISHED', { stepId, delta }), +} + +/** + * Create a mock adapter that satisfies AnyTextAdapter. + * `chatStreamFn` receives the options and returns an AsyncIterable of chunks. + * Multiple invocations can be tracked via the returned `calls` array. + */ +function createMockAdapter(options: { + chatStreamFn?: (opts: any) => AsyncIterable + /** Array of chunk sequences: chatStream returns iterations[0] on first call, iterations[1] on second, etc. */ + iterations?: Array> + structuredOutput?: (opts: any) => Promise<{ data: unknown; rawText: string }> +}) { + const calls: Array = [] + let callIndex = 0 + + const adapter: AnyTextAdapter = { + kind: 'text' as const, + name: 'mock', + model: 'test-model' as const, + '~types': { + providerOptions: {} as Record, + inputModalities: ['text'] as readonly ['text'], + messageMetadataByModality: { + text: undefined as unknown, + image: undefined as unknown, + audio: undefined as unknown, + video: undefined as unknown, + document: undefined as unknown, + }, + }, + chatStream: (opts: any) => { + calls.push(opts) + + if (options.chatStreamFn) { + return options.chatStreamFn(opts) + } + + if (options.iterations) { + const chunks = options.iterations[callIndex] || [] + callIndex++ + return (async function* () { + for (const c of chunks) yield c + })() + } + + return (async function* () {})() + }, + structuredOutput: + options.structuredOutput ?? (async () => ({ data: {}, rawText: '{}' })), + } + + return { adapter, calls } +} + +/** Collect all chunks from an async iterable. */ +async function collectChunks( + stream: AsyncIterable, +): Promise> { + const chunks: Array = [] + for await (const c of stream) { + chunks.push(c) + } + return chunks +} + +/** Simple server tool for testing. */ +function serverTool(name: string, executeFn: (args: any) => any): Tool { + return { + name, + description: `Test tool: ${name}`, + execute: executeFn, + } +} + +/** Client tool (no execute function). */ +function clientTool(name: string, opts?: { needsApproval?: boolean }): Tool { + return { + name, + description: `Client tool: ${name}`, + needsApproval: opts?.needsApproval, + } +} + +// ============================================================================ +// Tests +// ============================================================================ + +describe('chat()', () => { + // ========================================================================== + // Streaming text (no tools) + // ========================================================================== + describe('streaming text (no tools)', () => { + it('should return an async iterable that yields all adapter chunks', async () => { + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('Hello'), + ev.textContent(' world!'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Hi' }], + }) + + const chunks = await collectChunks(stream as AsyncIterable) + + expect(chunks.length).toBe(6) + expect(chunks[0]!.type).toBe('RUN_STARTED') + expect(chunks[1]!.type).toBe('TEXT_MESSAGE_START') + expect(chunks[2]!.type).toBe('TEXT_MESSAGE_CONTENT') + expect(chunks[3]!.type).toBe('TEXT_MESSAGE_CONTENT') + expect(chunks[4]!.type).toBe('TEXT_MESSAGE_END') + expect(chunks[5]!.type).toBe('RUN_FINISHED') + }) + + it('should pass messages to the adapter', async () => { + const { adapter, calls } = createMockAdapter({ + iterations: [ + [ev.runStarted(), ev.textContent('Hi'), ev.runFinished('stop')], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Hello' }], + }) + + await collectChunks(stream as AsyncIterable) + + expect(calls).toHaveLength(1) + expect(calls[0].messages).toBeDefined() + expect(calls[0].messages[0].role).toBe('user') + }) + + it('should pass systemPrompts to the adapter', async () => { + const { adapter, calls } = createMockAdapter({ + iterations: [[ev.runStarted(), ev.runFinished('stop')]], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Hello' }], + systemPrompts: ['You are a helpful assistant'], + }) + + await collectChunks(stream as AsyncIterable) + + expect(calls[0].systemPrompts).toEqual(['You are a helpful assistant']) + }) + + it('should pass temperature, topP, maxTokens to the adapter', async () => { + const { adapter, calls } = createMockAdapter({ + iterations: [[ev.runStarted(), ev.runFinished('stop')]], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Hello' }], + temperature: 0.5, + topP: 0.9, + maxTokens: 100, + }) + + await collectChunks(stream as AsyncIterable) + + expect(calls[0].temperature).toBe(0.5) + expect(calls[0].topP).toBe(0.9) + expect(calls[0].maxTokens).toBe(100) + }) + }) + + // ========================================================================== + // Non-streaming text (stream: false) + // ========================================================================== + describe('non-streaming text (stream: false)', () => { + it('should return a Promise with collected text content', async () => { + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('Hello'), + ev.textContent(' world!'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const result = await chat({ + adapter, + messages: [{ role: 'user', content: 'Hi' }], + stream: false, + }) + + expect(result).toBe('Hello world!') + }) + + it('should still execute tools under the hood when stream: false', async () => { + const executeSpy = vi.fn().mockReturnValue({ temp: 72 }) + + const { adapter } = createMockAdapter({ + iterations: [ + // First call: tool call + [ + ev.runStarted(), + ev.toolStart('call_1', 'getWeather'), + ev.toolArgs('call_1', '{"city":"NYC"}'), + ev.runFinished('tool_calls'), + ], + // Second call: final text + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('72F in NYC'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const result = await chat({ + adapter, + messages: [{ role: 'user', content: 'Weather in NYC?' }], + tools: [serverTool('getWeather', executeSpy)], + stream: false, + }) + + expect(executeSpy).toHaveBeenCalledTimes(1) + expect(result).toBe('72F in NYC') + }) + }) + + // ========================================================================== + // Server tool execution + // ========================================================================== + describe('server tool execution', () => { + it('should execute server tools and yield TOOL_CALL_END with result', async () => { + const executeSpy = vi.fn().mockReturnValue({ temp: 72 }) + + const { adapter, calls } = createMockAdapter({ + iterations: [ + // First adapter call: model requests tool + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('Let me check.'), + ev.textEnd(), + ev.toolStart('call_1', 'getWeather'), + ev.toolArgs('call_1', '{"city":"NYC"}'), + ev.runFinished('tool_calls'), + ], + // Second adapter call: model produces final text + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('72F in NYC.'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Weather?' }], + tools: [serverTool('getWeather', executeSpy)], + }) + + const chunks = await collectChunks(stream as AsyncIterable) + + // Tool was executed + expect(executeSpy).toHaveBeenCalledTimes(1) + expect(executeSpy).toHaveBeenCalledWith({ city: 'NYC' }) + + // A TOOL_CALL_END chunk with result should have been yielded + const toolEndChunks = chunks.filter( + (c) => c.type === 'TOOL_CALL_END' && 'result' in c && c.result, + ) + expect(toolEndChunks.length).toBeGreaterThanOrEqual(1) + + // Adapter was called twice (tool call iteration + final text) + expect(calls).toHaveLength(2) + + // Second call should have tool result in messages + const secondCallMessages = calls[1].messages + const toolResultMsg = secondCallMessages.find( + (m: any) => m.role === 'tool', + ) + expect(toolResultMsg).toBeDefined() + }) + + it('should handle tool execution errors gracefully', async () => { + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.toolStart('call_1', 'failTool'), + ev.toolArgs('call_1', '{}'), + ev.runFinished('tool_calls'), + ], + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('Error happened.'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Do something' }], + tools: [ + serverTool('failTool', () => { + throw new Error('Tool broke') + }), + ], + }) + + const chunks = await collectChunks(stream as AsyncIterable) + + // Should still complete and yield the error result + const toolEndChunks = chunks.filter( + (c) => c.type === 'TOOL_CALL_END' && 'result' in c, + ) + expect(toolEndChunks.length).toBeGreaterThanOrEqual(1) + // Error should be in the result + const resultStr = (toolEndChunks[0] as any).result + expect(resultStr).toContain('error') + }) + }) + + // ========================================================================== + // Parallel tool calls + // ========================================================================== + describe('parallel tool calls', () => { + it('should execute multiple tool calls and yield all results', async () => { + const weatherSpy = vi.fn().mockReturnValue({ temp: 72 }) + const timeSpy = vi.fn().mockReturnValue({ time: '3pm' }) + + const { adapter, calls } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.toolStart('call_1', 'getWeather', 0), + ev.toolStart('call_2', 'getTime', 1), + ev.toolArgs('call_1', '{"city":"NYC"}'), + ev.toolArgs('call_2', '{"tz":"EST"}'), + ev.runFinished('tool_calls'), + ], + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('72F, 3pm EST'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Weather and time?' }], + tools: [ + serverTool('getWeather', weatherSpy), + serverTool('getTime', timeSpy), + ], + }) + + await collectChunks(stream as AsyncIterable) + + expect(weatherSpy).toHaveBeenCalledTimes(1) + expect(timeSpy).toHaveBeenCalledTimes(1) + + // Second adapter call should have both tool results + const secondCallMessages = calls[1].messages + const toolResultMsgs = secondCallMessages.filter( + (m: any) => m.role === 'tool', + ) + expect(toolResultMsgs).toHaveLength(2) + }) + }) + + // ========================================================================== + // Client tools (no execute) + // ========================================================================== + describe('client tools (no execute)', () => { + it('should yield CUSTOM tool-input-available event for client tools', async () => { + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.toolStart('call_1', 'clientSearch'), + ev.toolArgs('call_1', '{"query":"test"}'), + ev.runFinished('tool_calls'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Search for test' }], + tools: [clientTool('clientSearch')], + }) + + const chunks = await collectChunks(stream as AsyncIterable) + + // Should yield a CUSTOM event for tool-input-available + const customChunks = chunks.filter( + (c) => + c.type === 'CUSTOM' && (c as any).name === 'tool-input-available', + ) + expect(customChunks).toHaveLength(1) + + const data = (customChunks[0] as any).data + expect(data.toolCallId).toBe('call_1') + expect(data.toolName).toBe('clientSearch') + expect(data.input).toEqual({ query: 'test' }) + }) + }) + + // ========================================================================== + // Approval flow + // ========================================================================== + describe('approval flow', () => { + it('should yield CUSTOM approval-requested event for tools with needsApproval', async () => { + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.toolStart('call_1', 'dangerousTool'), + ev.toolArgs('call_1', '{"action":"delete"}'), + ev.runFinished('tool_calls'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Delete something' }], + tools: [serverTool('dangerousTool', () => ({ ok: true }))].map((t) => ({ + ...t, + needsApproval: true, + })), + }) + + const chunks = await collectChunks(stream as AsyncIterable) + + const approvalChunks = chunks.filter( + (c) => c.type === 'CUSTOM' && (c as any).name === 'approval-requested', + ) + expect(approvalChunks).toHaveLength(1) + + const data = (approvalChunks[0] as any).data + expect(data.toolCallId).toBe('call_1') + expect(data.toolName).toBe('dangerousTool') + expect(data.approval.needsApproval).toBe(true) + }) + + it('should yield CUSTOM approval-requested for client tools with needsApproval', async () => { + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.toolStart('call_1', 'clientDanger'), + ev.toolArgs('call_1', '{}'), + ev.runFinished('tool_calls'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Do something' }], + tools: [clientTool('clientDanger', { needsApproval: true })], + }) + + const chunks = await collectChunks(stream as AsyncIterable) + + const approvalChunks = chunks.filter( + (c) => c.type === 'CUSTOM' && (c as any).name === 'approval-requested', + ) + expect(approvalChunks).toHaveLength(1) + }) + }) + + // ========================================================================== + // Pending tool calls from messages + // ========================================================================== + describe('pending tool calls from messages', () => { + it('should detect and execute pending tool calls from initial messages', async () => { + const executeSpy = vi.fn().mockReturnValue({ temp: 72 }) + + const { adapter, calls } = createMockAdapter({ + iterations: [ + // After pending tool is executed, the engine calls the adapter for the next response + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('72F in NYC'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [ + { role: 'user', content: 'Weather?' }, + { + role: 'assistant', + content: 'Let me check.', + toolCalls: [ + { + id: 'call_1', + type: 'function' as const, + function: { name: 'getWeather', arguments: '{"city":"NYC"}' }, + }, + ], + }, + // No tool result message -> pending! + ], + tools: [serverTool('getWeather', executeSpy)], + }) + + const chunks = await collectChunks(stream as AsyncIterable) + + // Tool should have been executed as pending + expect(executeSpy).toHaveBeenCalledTimes(1) + + // TOOL_CALL_END with result should be in the stream + const toolEndChunks = chunks.filter( + (c) => c.type === 'TOOL_CALL_END' && 'result' in c && c.result, + ) + expect(toolEndChunks.length).toBeGreaterThanOrEqual(1) + + // Adapter should have been called with the tool result in messages + expect(calls).toHaveLength(1) + const adapterMessages = calls[0].messages + const toolMsg = adapterMessages.find((m: any) => m.role === 'tool') + expect(toolMsg).toBeDefined() + }) + + it('should skip pending tool calls that already have results', async () => { + const executeSpy = vi.fn().mockReturnValue({ temp: 72 }) + + const { adapter, calls } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('Already answered.'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [ + { role: 'user', content: 'Weather?' }, + { + role: 'assistant', + content: 'Let me check.', + toolCalls: [ + { + id: 'call_1', + type: 'function' as const, + function: { name: 'getWeather', arguments: '{"city":"NYC"}' }, + }, + ], + }, + // Tool result IS present -> not pending + { role: 'tool', content: '{"temp":72}', toolCallId: 'call_1' }, + ], + tools: [serverTool('getWeather', executeSpy)], + }) + + await collectChunks(stream as AsyncIterable) + + // Tool should NOT have been executed again + expect(executeSpy).not.toHaveBeenCalled() + expect(calls).toHaveLength(1) + }) + }) + + // ========================================================================== + // Agent loop strategy + // ========================================================================== + describe('agent loop strategy', () => { + it('should stop after custom strategy says stop', async () => { + const executeSpy = vi.fn().mockReturnValue({ temp: 72 }) + + const { adapter, calls } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.toolStart('call_1', 'getWeather'), + ev.toolArgs('call_1', '{"city":"NYC"}'), + ev.runFinished('tool_calls'), + ], + // This second iteration should NOT be reached + [ + ev.runStarted(), + ev.textContent('Should not see this'), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Weather?' }], + tools: [serverTool('getWeather', executeSpy)], + // Strategy that stops immediately (no iterations) + agentLoopStrategy: () => false, + }) + + await collectChunks(stream as AsyncIterable) + + // Only first adapter call (tool call) should happen + // The tool is executed but the loop doesn't continue to a second model call + expect(calls).toHaveLength(1) + }) + + it('should respect maxIterations strategy', async () => { + const executeSpy = vi.fn().mockReturnValue({ data: 'result' }) + + let callCount = 0 + const { adapter, calls } = createMockAdapter({ + chatStreamFn: () => { + callCount++ + // Always return tool calls to test max iteration limit + return (async function* () { + yield ev.runStarted() + yield ev.toolStart(`call_${callCount}`, 'repeater') + yield ev.toolArgs(`call_${callCount}`, '{}') + yield ev.runFinished('tool_calls') + })() + }, + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Repeat' }], + tools: [serverTool('repeater', executeSpy)], + // maxIterations(2): allows iteration 0 and 1 + agentLoopStrategy: (state) => state.iterationCount < 2, + }) + + await collectChunks(stream as AsyncIterable) + + // Should have called the adapter 2 times (iterations 0 and 1) + // Each iteration has processText + executeToolCalls phases + expect(calls.length).toBe(2) + }) + }) + + // ========================================================================== + // Abort handling + // ========================================================================== + describe('abort handling', () => { + it('should stop streaming when abort is called', async () => { + const abortController = new AbortController() + let chunkCount = 0 + + const { adapter } = createMockAdapter({ + chatStreamFn: () => { + return (async function* () { + yield ev.runStarted() + yield ev.textStart() + yield ev.textContent('Hello') + // Abort after first content chunk is consumed + yield ev.textContent(' world') + yield ev.textContent(' more') + yield ev.textEnd() + yield ev.runFinished('stop') + })() + }, + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Hi' }], + abortController, + }) + + const chunks: Array = [] + for await (const c of stream as AsyncIterable) { + chunks.push(c) + chunkCount++ + if (chunkCount === 3) { + // Abort after receiving RUN_STARTED, TEXT_MESSAGE_START, first TEXT_MESSAGE_CONTENT + abortController.abort() + } + } + + // Should have stopped early - not all 7 chunks received + expect(chunks.length).toBeLessThan(7) + }) + }) + + // ========================================================================== + // Error handling + // ========================================================================== + describe('error handling', () => { + it('should yield RUN_ERROR and stop the loop', async () => { + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('Starting...'), + ev.runError('API rate limited'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Hi' }], + }) + + const chunks = await collectChunks(stream as AsyncIterable) + + // RUN_ERROR should be in the chunks + const errorChunks = chunks.filter((c) => c.type === 'RUN_ERROR') + expect(errorChunks).toHaveLength(1) + expect((errorChunks[0] as any).error.message).toBe('API rate limited') + }) + + it('should not continue the agent loop after RUN_ERROR', async () => { + const { adapter, calls } = createMockAdapter({ + iterations: [ + [ev.runStarted(), ev.runError('Fatal error')], + // This should never be called + [ + ev.runStarted(), + ev.textContent('Should not happen'), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Hi' }], + }) + + await collectChunks(stream as AsyncIterable) + + // Only first adapter call should happen + expect(calls).toHaveLength(1) + }) + }) + + // ========================================================================== + // Structured output + // ========================================================================== + describe('structured output', () => { + it('should run agentic loop then call adapter.structuredOutput', async () => { + const structuredOutputSpy = vi.fn().mockResolvedValue({ + data: { name: 'Alice', age: 30 }, + rawText: '{"name":"Alice","age":30}', + }) + + const { adapter } = createMockAdapter({ + iterations: [ + // Agentic loop runs first + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('Generating...'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + structuredOutput: structuredOutputSpy, + }) + + // Use a plain JSON Schema (not Standard Schema) so no validation step + const result = await chat({ + adapter, + messages: [{ role: 'user', content: 'Generate a person' }], + outputSchema: { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + } as any, + }) + + expect(structuredOutputSpy).toHaveBeenCalledTimes(1) + expect(result).toEqual({ name: 'Alice', age: 30 }) + }) + + it('should pass final messages to structuredOutput after tool execution', async () => { + const structuredOutputSpy = vi.fn().mockResolvedValue({ + data: { summary: 'Weather is 72F' }, + rawText: '{"summary":"Weather is 72F"}', + }) + + const { adapter } = createMockAdapter({ + iterations: [ + // First: tool call + [ + ev.runStarted(), + ev.toolStart('call_1', 'getWeather'), + ev.toolArgs('call_1', '{"city":"NYC"}'), + ev.runFinished('tool_calls'), + ], + // Second: final text + [ev.runStarted(), ev.textContent('Done.'), ev.runFinished('stop')], + ], + structuredOutput: structuredOutputSpy, + }) + + await chat({ + adapter, + messages: [{ role: 'user', content: 'Summarize weather' }], + tools: [serverTool('getWeather', () => ({ temp: 72 }))], + outputSchema: { + type: 'object', + properties: { summary: { type: 'string' } }, + } as any, + }) + + // structuredOutput should have been called with messages that include tool results + const structuredCall = structuredOutputSpy.mock.calls[0]![0] + const messages = structuredCall.chatOptions.messages + const toolMsg = messages.find((m: any) => m.role === 'tool') + expect(toolMsg).toBeDefined() + }) + }) + + // ========================================================================== + // Thinking/step events + // ========================================================================== + describe('thinking/step events', () => { + it('should yield STEP_FINISHED chunks through', async () => { + const { adapter } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.stepFinished('Let me think'), + ev.stepFinished(' about this...'), + ev.textStart(), + ev.textContent('Answer!'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Think about it' }], + }) + + const chunks = await collectChunks(stream as AsyncIterable) + + const stepChunks = chunks.filter((c) => c.type === 'STEP_FINISHED') + expect(stepChunks).toHaveLength(2) + expect((stepChunks[0] as any).delta).toBe('Let me think') + expect((stepChunks[1] as any).delta).toBe(' about this...') + }) + }) + + // ========================================================================== + // createChatOptions helper + // ========================================================================== + describe('createChatOptions', () => { + it('should return the same options object (passthrough)', () => { + const { adapter } = createMockAdapter({}) + + const options = createChatOptions({ + adapter, + messages: [{ role: 'user', content: 'Hello' }], + temperature: 0.7, + }) + + expect(options.adapter).toBe(adapter) + expect(options.temperature).toBe(0.7) + expect(options.messages).toEqual([{ role: 'user', content: 'Hello' }]) + }) + }) + + // ========================================================================== + // Multi-iteration agent loop + // ========================================================================== + describe('multi-iteration agent loop', () => { + it('should handle two sequential tool call iterations', async () => { + const tool1Spy = vi.fn().mockReturnValue({ result: 'data1' }) + const tool2Spy = vi.fn().mockReturnValue({ result: 'data2' }) + + const { adapter, calls } = createMockAdapter({ + iterations: [ + // Iteration 1: first tool call + [ + ev.runStarted(), + ev.toolStart('call_1', 'tool1'), + ev.toolArgs('call_1', '{}'), + ev.runFinished('tool_calls'), + ], + // Iteration 2: second tool call + [ + ev.runStarted(), + ev.toolStart('call_2', 'tool2'), + ev.toolArgs('call_2', '{}'), + ev.runFinished('tool_calls'), + ], + // Iteration 3: final text + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('All done.'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Do two things' }], + tools: [serverTool('tool1', tool1Spy), serverTool('tool2', tool2Spy)], + }) + + await collectChunks(stream as AsyncIterable) + + expect(tool1Spy).toHaveBeenCalledTimes(1) + expect(tool2Spy).toHaveBeenCalledTimes(1) + expect(calls).toHaveLength(3) + }) + }) + + // ========================================================================== + // Edge cases + // ========================================================================== + describe('edge cases', () => { + it('should handle empty messages array', async () => { + const { adapter } = createMockAdapter({ + iterations: [ + [ev.runStarted(), ev.textContent('Hello'), ev.runFinished('stop')], + ], + }) + + const stream = chat({ + adapter, + messages: [], + }) + + const chunks = await collectChunks(stream as AsyncIterable) + expect(chunks.length).toBeGreaterThan(0) + }) + + it('should handle adapter yielding no chunks', async () => { + const { adapter } = createMockAdapter({ + iterations: [[]], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Hi' }], + }) + + const chunks = await collectChunks(stream as AsyncIterable) + // Should complete without error even with empty stream + expect(chunks).toHaveLength(0) + }) + + it('should pass modelOptions through to adapter', async () => { + const { adapter, calls } = createMockAdapter({ + iterations: [[ev.runStarted(), ev.runFinished('stop')]], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Hi' }], + modelOptions: { customParam: 'value' } as any, + }) + + await collectChunks(stream as AsyncIterable) + expect(calls[0].modelOptions).toEqual({ customParam: 'value' }) + }) + + it('should handle TEXT_MESSAGE_CONTENT with content field', async () => { + const { adapter } = createMockAdapter({ + chatStreamFn: () => { + return (async function* () { + yield ev.runStarted() + yield ev.textStart() + // Include the optional content field + yield { + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'msg-1', + delta: 'Hello', + content: 'Hello', + timestamp: Date.now(), + } as StreamChunk + yield ev.textEnd() + yield ev.runFinished('stop') + })() + }, + }) + + const result = await chat({ + adapter, + messages: [{ role: 'user', content: 'Hi' }], + stream: false, + }) + + expect(result).toBe('Hello') + }) + }) +}) diff --git a/packages/typescript/ai/tests/message-converters.test.ts b/packages/typescript/ai/tests/message-converters.test.ts index 60df58ec..8bf0cd7e 100644 --- a/packages/typescript/ai/tests/message-converters.test.ts +++ b/packages/typescript/ai/tests/message-converters.test.ts @@ -1,6 +1,9 @@ import { describe, expect, it } from 'vitest' import { + convertMessagesToModelMessages, modelMessageToUIMessage, + modelMessagesToUIMessages, + normalizeToUIMessage, uiMessageToModelMessages, } from '../src/activities/chat/messages' import type { ContentPart, ModelMessage, UIMessage } from '../src/types' @@ -314,6 +317,13 @@ describe('Message Converters', () => { id: 'msg-1', role: 'assistant', parts: [ + { + type: 'tool-call', + id: 'tool-1', + name: 'getWeather', + arguments: '{"city": "NYC"}', + state: 'input-complete', + }, { type: 'tool-result', toolCallId: 'tool-1', @@ -325,12 +335,156 @@ describe('Message Converters', () => { const result = uiMessageToModelMessages(uiMessage) - // Should have assistant message + tool message + // Should have assistant message (with tool call) + tool result message expect(result.length).toBe(2) + expect(result[0]?.role).toBe('assistant') + expect(result[0]?.toolCalls?.[0]?.id).toBe('tool-1') expect(result[1]?.role).toBe('tool') expect(result[1]?.toolCallId).toBe('tool-1') expect(result[1]?.content).toBe('{"temp": 72}') }) + + it('should preserve interleaving of text, tool calls, and tool results', () => { + const uiMessage: UIMessage = { + id: 'msg-1', + role: 'assistant', + parts: [ + { type: 'text', content: 'Let me check the weather.' }, + { + type: 'tool-call', + id: 'tc-1', + name: 'getWeather', + arguments: '{"city": "NYC"}', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-1', + content: '{"temp": 72}', + state: 'complete', + }, + { type: 'text', content: 'The temperature is 72F.' }, + ], + } + + const result = uiMessageToModelMessages(uiMessage) + + // Should produce: assistant(text1 + toolCall) → tool(result) → assistant(text2) + expect(result.length).toBe(3) + + expect(result[0]?.role).toBe('assistant') + expect(result[0]?.content).toBe('Let me check the weather.') + expect(result[0]?.toolCalls).toHaveLength(1) + expect(result[0]?.toolCalls?.[0]?.id).toBe('tc-1') + + expect(result[1]?.role).toBe('tool') + expect(result[1]?.toolCallId).toBe('tc-1') + expect(result[1]?.content).toBe('{"temp": 72}') + + expect(result[2]?.role).toBe('assistant') + expect(result[2]?.content).toBe('The temperature is 72F.') + expect(result[2]?.toolCalls).toBeUndefined() + }) + + it('should handle multi-round tool flow (text1 -> tool1 -> result1 -> text2 -> tool2 -> result2)', () => { + const uiMessage: UIMessage = { + id: 'msg-1', + role: 'assistant', + parts: [ + { type: 'text', content: 'Let me check our inventory.' }, + { + type: 'tool-call', + id: 'tc-get', + name: 'getGuitars', + arguments: '', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-get', + content: '[{"id":7,"name":"Travelin Man"}]', + state: 'complete', + }, + { + type: 'text', + content: 'I found a great guitar! Let me recommend it.', + }, + { + type: 'tool-call', + id: 'tc-rec', + name: 'recommendGuitar', + arguments: '{"id": 7}', + state: 'input-complete', + output: { id: 7 }, + }, + { + type: 'tool-result', + toolCallId: 'tc-rec', + content: '{"id":7}', + state: 'complete', + }, + ], + } + + const result = uiMessageToModelMessages(uiMessage) + + // Should produce: + // 1. assistant(text1 + getGuitars) + // 2. tool(getGuitars result) + // 3. assistant(text2 + recommendGuitar) + // 4. tool(recommendGuitar result) -- only once, no duplicate + expect(result.length).toBe(4) + + expect(result[0]?.role).toBe('assistant') + expect(result[0]?.content).toBe('Let me check our inventory.') + expect(result[0]?.toolCalls?.[0]?.function.name).toBe('getGuitars') + + expect(result[1]?.role).toBe('tool') + expect(result[1]?.toolCallId).toBe('tc-get') + + expect(result[2]?.role).toBe('assistant') + expect(result[2]?.content).toBe( + 'I found a great guitar! Let me recommend it.', + ) + expect(result[2]?.toolCalls?.[0]?.function.name).toBe('recommendGuitar') + + expect(result[3]?.role).toBe('tool') + expect(result[3]?.toolCallId).toBe('tc-rec') + + // No duplicate tool result for recommendGuitar (has both output and tool-result) + const toolMessages = result.filter((m) => m.role === 'tool') + expect(toolMessages).toHaveLength(2) + }) + + it('should handle tool-call-only segment (no text before tool call)', () => { + const uiMessage: UIMessage = { + id: 'msg-1', + role: 'assistant', + parts: [ + { + type: 'tool-call', + id: 'tc-1', + name: 'getGuitars', + arguments: '{}', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-1', + content: '[]', + state: 'complete', + }, + ], + } + + const result = uiMessageToModelMessages(uiMessage) + + expect(result.length).toBe(2) + expect(result[0]?.role).toBe('assistant') + expect(result[0]?.content).toBeNull() + expect(result[0]?.toolCalls).toHaveLength(1) + expect(result[1]?.role).toBe('tool') + }) }) describe('modelMessageToUIMessage', () => { @@ -358,7 +512,7 @@ describe('Message Converters', () => { expect(result.id).toBe('custom-id') }) - it('should convert multimodal content to text', () => { + it('should preserve multimodal content parts', () => { const modelMessage: ModelMessage = { role: 'user', content: [ @@ -372,8 +526,13 @@ describe('Message Converters', () => { const result = modelMessageToUIMessage(modelMessage) - // Currently, modelMessageToUIMessage only extracts text content - expect(result.parts).toEqual([{ type: 'text', content: 'What is this?' }]) + expect(result.parts).toEqual([ + { type: 'text', content: 'What is this?' }, + { + type: 'image', + source: { type: 'url', value: 'https://example.com/img.jpg' }, + }, + ]) }) it('should handle tool message', () => { @@ -393,5 +552,963 @@ describe('Message Converters', () => { state: 'complete', }) }) + + it('should convert assistant message with toolCalls and text', () => { + const modelMessage: ModelMessage = { + role: 'assistant', + content: 'Let me check the weather.', + toolCalls: [ + { + id: 'tc-1', + type: 'function', + function: { + name: 'getWeather', + arguments: '{"city": "NYC"}', + }, + }, + ], + } + + const result = modelMessageToUIMessage(modelMessage) + + expect(result.role).toBe('assistant') + expect(result.parts).toEqual([ + { type: 'text', content: 'Let me check the weather.' }, + { + type: 'tool-call', + id: 'tc-1', + name: 'getWeather', + arguments: '{"city": "NYC"}', + state: 'input-complete', + }, + ]) + }) + + it('should convert assistant message with toolCalls and null content', () => { + const modelMessage: ModelMessage = { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'tc-1', + type: 'function', + function: { + name: 'getWeather', + arguments: '{"city": "NYC"}', + }, + }, + ], + } + + const result = modelMessageToUIMessage(modelMessage) + + expect(result.role).toBe('assistant') + // Should have only tool-call part, no text part + expect(result.parts).toEqual([ + { + type: 'tool-call', + id: 'tc-1', + name: 'getWeather', + arguments: '{"city": "NYC"}', + state: 'input-complete', + }, + ]) + }) + + it('should preserve multimodal content parts (image, audio, video, document)', () => { + const modelMessage: ModelMessage = { + role: 'user', + content: [ + { type: 'text', content: 'What is this?' }, + { + type: 'image', + source: { type: 'url', value: 'https://example.com/img.jpg' }, + }, + { + type: 'audio', + source: { + type: 'data', + value: 'base64audio', + mimeType: 'audio/mp3', + }, + }, + { + type: 'video', + source: { type: 'url', value: 'https://example.com/video.mp4' }, + }, + { + type: 'document', + source: { + type: 'data', + value: 'base64pdf', + mimeType: 'application/pdf', + }, + }, + ], + } + + const result = modelMessageToUIMessage(modelMessage) + + expect(result.parts.length).toBe(5) + expect(result.parts[0]).toEqual({ + type: 'text', + content: 'What is this?', + }) + expect(result.parts[1]).toEqual({ + type: 'image', + source: { type: 'url', value: 'https://example.com/img.jpg' }, + }) + expect(result.parts[2]).toEqual({ + type: 'audio', + source: { + type: 'data', + value: 'base64audio', + mimeType: 'audio/mp3', + }, + }) + expect(result.parts[3]).toEqual({ + type: 'video', + source: { type: 'url', value: 'https://example.com/video.mp4' }, + }) + expect(result.parts[4]).toEqual({ + type: 'document', + source: { + type: 'data', + value: 'base64pdf', + mimeType: 'application/pdf', + }, + }) + }) + + it('should handle null content', () => { + const modelMessage: ModelMessage = { + role: 'assistant', + content: null, + } + + const result = modelMessageToUIMessage(modelMessage) + + expect(result.role).toBe('assistant') + expect(result.parts).toEqual([]) + }) + + it('should handle empty string content', () => { + const modelMessage: ModelMessage = { + role: 'assistant', + content: '', + } + + const result = modelMessageToUIMessage(modelMessage) + + expect(result.role).toBe('assistant') + // Empty string has no text content, so no text part + expect(result.parts).toEqual([]) + }) + + it('should not produce redundant text part for tool messages', () => { + const modelMessage: ModelMessage = { + role: 'tool', + content: '{"temp": 72}', + toolCallId: 'tool-1', + } + + const result = modelMessageToUIMessage(modelMessage) + + // Should have only the tool-result part, NOT a text part + tool-result + const textParts = result.parts.filter((p) => p.type === 'text') + const toolResultParts = result.parts.filter( + (p) => p.type === 'tool-result', + ) + expect(textParts).toHaveLength(0) + expect(toolResultParts).toHaveLength(1) + expect(toolResultParts[0]).toEqual({ + type: 'tool-result', + toolCallId: 'tool-1', + content: '{"temp": 72}', + state: 'complete', + }) + }) + + it('should preserve multimodal content with metadata', () => { + const modelMessage: ModelMessage = { + role: 'user', + content: [ + { type: 'text', content: 'Analyze' }, + { + type: 'image', + source: { type: 'url', value: 'https://example.com/cat.jpg' }, + metadata: { detail: 'high' }, + }, + ], + } + + const result = modelMessageToUIMessage(modelMessage) + + expect(result.parts.length).toBe(2) + expect(result.parts[1]).toEqual({ + type: 'image', + source: { type: 'url', value: 'https://example.com/cat.jpg' }, + metadata: { detail: 'high' }, + }) + }) + }) + + describe('uiMessageToModelMessages - duplicate tool result prevention', () => { + it('should not create duplicate tool results when tool-call has output AND tool-result exists', () => { + // This scenario happens when a client tool executes: the UIMessage has both + // a tool-call part with output AND a tool-result part for the same toolCallId + const uiMessage: UIMessage = { + id: 'msg-1', + role: 'assistant', + parts: [ + { + type: 'text', + content: 'Let me recommend a guitar.', + }, + { + type: 'tool-call', + id: 'tc-1', + name: 'recommendGuitar', + arguments: '{"id": 7}', + state: 'input-complete', + output: { id: 7 }, + }, + { + type: 'tool-result', + toolCallId: 'tc-1', + content: '{"id":7}', + state: 'complete', + }, + ], + } + + const result = uiMessageToModelMessages(uiMessage) + + // Should have: 1 assistant message + 1 tool result (NOT 2) + const toolMessages = result.filter((m) => m.role === 'tool') + expect(toolMessages).toHaveLength(1) + expect(toolMessages[0]?.toolCallId).toBe('tc-1') + }) + + it('should handle multi-round tool calls without duplicating results', () => { + // This scenario simulates the full multi-round message: + // text1 + getGuitars tool call + getGuitars result + text2 + recommendGuitar tool call + recommendGuitar result + const uiMessage: UIMessage = { + id: 'msg-1', + role: 'assistant', + parts: [ + { type: 'text', content: 'Let me check our inventory.' }, + { + type: 'tool-call', + id: 'tc-get', + name: 'getGuitars', + arguments: '', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-get', + content: '[{"id":7,"name":"Travelin Man Guitar"}]', + state: 'complete', + }, + { type: 'text', content: 'I found a great guitar!' }, + { + type: 'tool-call', + id: 'tc-rec', + name: 'recommendGuitar', + arguments: '{"id": 7}', + state: 'input-complete', + output: { id: 7 }, + }, + { + type: 'tool-result', + toolCallId: 'tc-rec', + content: '{"id":7}', + state: 'complete', + }, + ], + } + + const result = uiMessageToModelMessages(uiMessage) + + // Should have exactly 2 tool result messages (one per tool call, no duplicates) + const toolMessages = result.filter((m) => m.role === 'tool') + expect(toolMessages).toHaveLength(2) + expect(toolMessages[0]?.toolCallId).toBe('tc-get') + expect(toolMessages[1]?.toolCallId).toBe('tc-rec') + }) + }) + + describe('modelMessagesToUIMessages', () => { + it('should convert simple user + assistant conversation', () => { + const modelMessages: Array = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there!' }, + ] + + const result = modelMessagesToUIMessages(modelMessages) + + expect(result.length).toBe(2) + expect(result[0]?.role).toBe('user') + expect(result[0]?.parts).toEqual([{ type: 'text', content: 'Hello' }]) + expect(result[1]?.role).toBe('assistant') + expect(result[1]?.parts).toEqual([{ type: 'text', content: 'Hi there!' }]) + }) + + it('should merge tool result into preceding assistant message', () => { + const modelMessages: Array = [ + { + role: 'assistant', + content: 'Let me check.', + toolCalls: [ + { + id: 'tc-1', + type: 'function', + function: { name: 'getWeather', arguments: '{"city":"NYC"}' }, + }, + ], + }, + { + role: 'tool', + content: '{"temp": 72}', + toolCallId: 'tc-1', + }, + ] + + const result = modelMessagesToUIMessages(modelMessages) + + // Tool result should be merged into the assistant message + expect(result.length).toBe(1) + expect(result[0]?.role).toBe('assistant') + expect(result[0]?.parts).toEqual([ + { type: 'text', content: 'Let me check.' }, + { + type: 'tool-call', + id: 'tc-1', + name: 'getWeather', + arguments: '{"city":"NYC"}', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-1', + content: '{"temp": 72}', + state: 'complete', + }, + ]) + }) + + it('should handle multi-round tool flow with proper merging', () => { + const modelMessages: Array = [ + { + role: 'assistant', + content: 'Checking inventory.', + toolCalls: [ + { + id: 'tc-1', + type: 'function', + function: { name: 'getGuitars', arguments: '' }, + }, + ], + }, + { + role: 'tool', + content: '[{"id":7}]', + toolCallId: 'tc-1', + }, + { + role: 'assistant', + content: 'Found one! Recommending.', + toolCalls: [ + { + id: 'tc-2', + type: 'function', + function: { name: 'recommend', arguments: '{"id":7}' }, + }, + ], + }, + { + role: 'tool', + content: '{"recommended":true}', + toolCallId: 'tc-2', + }, + ] + + const result = modelMessagesToUIMessages(modelMessages) + + // Each assistant message should have its tool result merged in + expect(result.length).toBe(2) + + expect(result[0]?.parts).toEqual([ + { type: 'text', content: 'Checking inventory.' }, + { + type: 'tool-call', + id: 'tc-1', + name: 'getGuitars', + arguments: '', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-1', + content: '[{"id":7}]', + state: 'complete', + }, + ]) + + expect(result[1]?.parts).toEqual([ + { type: 'text', content: 'Found one! Recommending.' }, + { + type: 'tool-call', + id: 'tc-2', + name: 'recommend', + arguments: '{"id":7}', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-2', + content: '{"recommended":true}', + state: 'complete', + }, + ]) + }) + + it('should create standalone message for orphan tool result', () => { + const modelMessages: Array = [ + { + role: 'tool', + content: '{"result": "orphan"}', + toolCallId: 'tc-1', + }, + ] + + const result = modelMessagesToUIMessages(modelMessages) + + expect(result.length).toBe(1) + expect(result[0]?.role).toBe('assistant') + expect(result[0]?.parts).toContainEqual({ + type: 'tool-result', + toolCallId: 'tc-1', + content: '{"result": "orphan"}', + state: 'complete', + }) + }) + + it('should not merge tool result across user messages', () => { + const modelMessages: Array = [ + { role: 'user', content: 'Hi' }, + { role: 'assistant', content: 'Hello!' }, + { role: 'user', content: 'Another question' }, + { + role: 'tool', + content: '{"result": "orphan"}', + toolCallId: 'tc-1', + }, + ] + + const result = modelMessagesToUIMessages(modelMessages) + + // Tool result should NOT merge into the assistant message (user message in between) + expect(result.length).toBe(4) + expect(result[3]?.role).toBe('assistant') + expect(result[3]?.parts).toContainEqual({ + type: 'tool-result', + toolCallId: 'tc-1', + content: '{"result": "orphan"}', + state: 'complete', + }) + }) + + it('should handle complex interleaved conversation', () => { + const modelMessages: Array = [ + { role: 'user', content: 'Check the weather' }, + { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'tc-1', + type: 'function', + function: { name: 'getWeather', arguments: '{"city":"NYC"}' }, + }, + ], + }, + { role: 'tool', content: '{"temp":72}', toolCallId: 'tc-1' }, + { role: 'assistant', content: 'The temperature is 72F.' }, + ] + + const result = modelMessagesToUIMessages(modelMessages) + + expect(result.length).toBe(3) + expect(result[0]?.role).toBe('user') + + // Assistant with tool call + merged tool result + expect(result[1]?.role).toBe('assistant') + const assistantParts = result[1]?.parts || [] + expect(assistantParts).toContainEqual({ + type: 'tool-call', + id: 'tc-1', + name: 'getWeather', + arguments: '{"city":"NYC"}', + state: 'input-complete', + }) + expect(assistantParts).toContainEqual({ + type: 'tool-result', + toolCallId: 'tc-1', + content: '{"temp":72}', + state: 'complete', + }) + + // Final assistant text + expect(result[2]?.role).toBe('assistant') + expect(result[2]?.parts).toEqual([ + { type: 'text', content: 'The temperature is 72F.' }, + ]) + }) + }) + + describe('convertMessagesToModelMessages', () => { + it('should pass through ModelMessages unchanged', () => { + const messages: Array = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi' }, + ] + + const result = convertMessagesToModelMessages(messages) + + expect(result).toEqual(messages) + }) + + it('should convert UIMessages to ModelMessages', () => { + const messages: Array = [ + { + id: 'msg-1', + role: 'user', + parts: [{ type: 'text', content: 'Hello' }], + }, + ] + + const result = convertMessagesToModelMessages(messages) + + expect(result).toEqual([{ role: 'user', content: 'Hello' }]) + }) + + it('should handle mixed UIMessage and ModelMessage array', () => { + const messages: Array = [ + { + id: 'msg-1', + role: 'user', + parts: [{ type: 'text', content: 'Hello' }], + }, + { role: 'assistant', content: 'Hi there!' }, + ] + + const result = convertMessagesToModelMessages(messages) + + expect(result).toEqual([ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there!' }, + ]) + }) + + it('should handle empty array', () => { + const result = convertMessagesToModelMessages([]) + expect(result).toEqual([]) + }) + }) + + describe('normalizeToUIMessage', () => { + it('should pass through UIMessage with existing id and createdAt', () => { + const date = new Date('2025-01-01') + const message: UIMessage = { + id: 'existing-id', + role: 'user', + parts: [{ type: 'text', content: 'Hello' }], + createdAt: date, + } + + const result = normalizeToUIMessage(message, () => 'generated-id') + + expect(result.id).toBe('existing-id') + expect(result.createdAt).toBe(date) + expect(result.parts).toEqual([{ type: 'text', content: 'Hello' }]) + }) + + it('should generate id for UIMessage without id', () => { + const message = { + id: '', + role: 'user' as const, + parts: [{ type: 'text' as const, content: 'Hello' }], + } + + const result = normalizeToUIMessage(message, () => 'generated-id') + + expect(result.id).toBe('generated-id') + expect(result.createdAt).toBeTruthy() + }) + + it('should convert ModelMessage to UIMessage', () => { + const message: ModelMessage = { + role: 'user', + content: 'Hello', + } + + const result = normalizeToUIMessage(message, () => 'generated-id') + + expect(result.id).toBe('generated-id') + expect(result.role).toBe('user') + expect(result.parts).toEqual([{ type: 'text', content: 'Hello' }]) + expect(result.createdAt).toBeTruthy() + }) + }) + + describe('Round-trip symmetry: UI -> Model -> UI', () => { + it('should round-trip simple text user message', () => { + const original: UIMessage = { + id: 'msg-1', + role: 'user', + parts: [{ type: 'text', content: 'Hello world' }], + } + + const modelMessages = uiMessageToModelMessages(original) + const uiMessages = modelMessagesToUIMessages(modelMessages) + + expect(uiMessages.length).toBe(1) + expect(uiMessages[0]?.role).toBe(original.role) + expect(uiMessages[0]?.parts).toEqual(original.parts) + }) + + it('should round-trip assistant with tool-call + tool-result', () => { + const original: UIMessage = { + id: 'msg-1', + role: 'assistant', + parts: [ + { type: 'text', content: 'Let me check.' }, + { + type: 'tool-call', + id: 'tc-1', + name: 'getWeather', + arguments: '{"city":"NYC"}', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-1', + content: '{"temp": 72}', + state: 'complete', + }, + ], + } + + const modelMessages = uiMessageToModelMessages(original) + const uiMessages = modelMessagesToUIMessages(modelMessages) + + // Should produce a single assistant UIMessage with all parts merged back + expect(uiMessages.length).toBe(1) + expect(uiMessages[0]?.role).toBe('assistant') + expect(uiMessages[0]?.parts).toEqual(original.parts) + }) + + it('should round-trip multimodal user message with image', () => { + const original: UIMessage = { + id: 'msg-1', + role: 'user', + parts: [ + { type: 'text', content: 'What is this?' }, + { + type: 'image', + source: { type: 'url', value: 'https://example.com/img.jpg' }, + }, + ], + } + + const modelMessages = uiMessageToModelMessages(original) + const uiMessages = modelMessagesToUIMessages(modelMessages) + + expect(uiMessages.length).toBe(1) + expect(uiMessages[0]?.role).toBe('user') + expect(uiMessages[0]?.parts).toEqual(original.parts) + }) + + it('should round-trip multi-round tool flow', () => { + const original: UIMessage = { + id: 'msg-1', + role: 'assistant', + parts: [ + { type: 'text', content: 'Checking inventory.' }, + { + type: 'tool-call', + id: 'tc-1', + name: 'getGuitars', + arguments: '', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-1', + content: '[{"id":7}]', + state: 'complete', + }, + { type: 'text', content: 'Found one!' }, + { + type: 'tool-call', + id: 'tc-2', + name: 'recommend', + arguments: '{"id":7}', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-2', + content: '{"recommended":true}', + state: 'complete', + }, + ], + } + + const modelMessages = uiMessageToModelMessages(original) + const uiMessages = modelMessagesToUIMessages(modelMessages) + + // Multi-round should produce multiple UIMessages (one per segment) + // but when recombined, the structure should match segments + expect(uiMessages.length).toBe(2) + + // First segment: text + tool-call + tool-result + expect(uiMessages[0]?.parts).toEqual([ + { type: 'text', content: 'Checking inventory.' }, + { + type: 'tool-call', + id: 'tc-1', + name: 'getGuitars', + arguments: '', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-1', + content: '[{"id":7}]', + state: 'complete', + }, + ]) + + // Second segment: text + tool-call + tool-result + expect(uiMessages[1]?.parts).toEqual([ + { type: 'text', content: 'Found one!' }, + { + type: 'tool-call', + id: 'tc-2', + name: 'recommend', + arguments: '{"id":7}', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc-2', + content: '{"recommended":true}', + state: 'complete', + }, + ]) + }) + }) + + describe('Round-trip symmetry: Model -> UI -> Model', () => { + it('should round-trip simple text messages', () => { + const original: Array = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there!' }, + ] + + const uiMessages = modelMessagesToUIMessages(original) + const modelMessages = convertMessagesToModelMessages(uiMessages) + + expect(modelMessages).toEqual(original) + }) + + it('should round-trip assistant with toolCalls + tool result', () => { + const original: Array = [ + { + role: 'assistant', + content: 'Let me check.', + toolCalls: [ + { + id: 'tc-1', + type: 'function', + function: { name: 'getWeather', arguments: '{"city":"NYC"}' }, + }, + ], + }, + { + role: 'tool', + content: '{"temp": 72}', + toolCallId: 'tc-1', + }, + ] + + const uiMessages = modelMessagesToUIMessages(original) + const modelMessages = convertMessagesToModelMessages(uiMessages) + + expect(modelMessages).toEqual(original) + }) + + it('should round-trip multimodal content array', () => { + const original: Array = [ + { + role: 'user', + content: [ + { type: 'text', content: 'What is this?' }, + { + type: 'image', + source: { type: 'url', value: 'https://example.com/img.jpg' }, + }, + ], + }, + ] + + const uiMessages = modelMessagesToUIMessages(original) + const modelMessages = convertMessagesToModelMessages(uiMessages) + + expect(modelMessages).toEqual(original) + }) + + it('should round-trip multi-round tool conversation', () => { + const original: Array = [ + { role: 'user', content: 'Check guitars' }, + { + role: 'assistant', + content: 'Checking.', + toolCalls: [ + { + id: 'tc-1', + type: 'function', + function: { name: 'getGuitars', arguments: '' }, + }, + ], + }, + { role: 'tool', content: '[{"id":7}]', toolCallId: 'tc-1' }, + { + role: 'assistant', + content: 'Found one!', + toolCalls: [ + { + id: 'tc-2', + type: 'function', + function: { name: 'recommend', arguments: '{"id":7}' }, + }, + ], + }, + { + role: 'tool', + content: '{"recommended":true}', + toolCallId: 'tc-2', + }, + { role: 'assistant', content: 'Here is my recommendation.' }, + ] + + const uiMessages = modelMessagesToUIMessages(original) + const modelMessages = convertMessagesToModelMessages(uiMessages) + + expect(modelMessages).toEqual(original) + }) + + it('should round-trip assistant with null content and toolCalls', () => { + const original: Array = [ + { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'tc-1', + type: 'function', + function: { name: 'getWeather', arguments: '{}' }, + }, + ], + }, + { role: 'tool', content: '{"temp":72}', toolCallId: 'tc-1' }, + ] + + const uiMessages = modelMessagesToUIMessages(original) + const modelMessages = convertMessagesToModelMessages(uiMessages) + + expect(modelMessages).toEqual(original) + }) + }) + + describe('uiMessageToModelMessages - approval response handling', () => { + it('should emit pendingExecution marker for approved client tool', () => { + const uiMessage: UIMessage = { + id: 'msg-1', + role: 'assistant', + parts: [ + { type: 'text', content: 'Let me delete that for you.' }, + { + type: 'tool-call', + id: 'call_123', + name: 'delete_local_data', + arguments: '{"key":"myKey"}', + state: 'approval-responded', + approval: { + id: 'approval_call_123', + needsApproval: true, + approved: true, + }, + }, + ], + } + + const result = uiMessageToModelMessages(uiMessage) + + // Should produce: assistant message with text + toolCall, then a tool result + expect(result.length).toBeGreaterThanOrEqual(2) + + // The assistant message should include the tool call + const assistantMsg = result.find( + (m) => m.role === 'assistant' && m.toolCalls, + ) + expect(assistantMsg).toBeDefined() + expect(assistantMsg!.toolCalls).toHaveLength(1) + expect(assistantMsg!.toolCalls![0]!.id).toBe('call_123') + + // The tool result message should have pendingExecution marker + const toolMsg = result.find( + (m) => m.role === 'tool' && m.toolCallId === 'call_123', + ) + expect(toolMsg).toBeDefined() + const content = JSON.parse(toolMsg!.content as string) + expect(content.approved).toBe(true) + expect(content.pendingExecution).toBe(true) + }) + + it('should emit declined message for denied client tool without pendingExecution', () => { + const uiMessage: UIMessage = { + id: 'msg-1', + role: 'assistant', + parts: [ + { + type: 'tool-call', + id: 'call_456', + name: 'delete_local_data', + arguments: '{"key":"myKey"}', + state: 'approval-responded', + approval: { + id: 'approval_call_456', + needsApproval: true, + approved: false, + }, + }, + ], + } + + const result = uiMessageToModelMessages(uiMessage) + + const toolMsg = result.find( + (m) => m.role === 'tool' && m.toolCallId === 'call_456', + ) + expect(toolMsg).toBeDefined() + const content = JSON.parse(toolMsg!.content as string) + expect(content.approved).toBe(false) + expect(content.pendingExecution).toBeUndefined() + expect(content.message).toBe('User denied this action') + }) }) }) diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index 3358e297..ddb7f812 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -1,168 +1,1801 @@ -import { describe, expect, it } from 'vitest' -import { StreamProcessor } from '../src/activities/chat/stream/processor' -import type { StreamChunk } from '../src/types' +import { type Mock, describe, expect, it, vi } from 'vitest' +import { + StreamProcessor, + createReplayStream, +} from '../src/activities/chat/stream/processor' +import type { StreamProcessorEvents } from '../src/activities/chat/stream/processor' +import type { ChunkStrategy } from '../src/activities/chat/stream/types' +import type { + StreamChunk, + ToolCallPart, + ToolResultPart, + UIMessage, +} from '../src/types' + +// ============================================================================ +// Helpers +// ============================================================================ + +/** Create a typed StreamChunk with minimal boilerplate. */ +function chunk( + type: T, + fields: Omit, 'type' | 'timestamp'>, +): StreamChunk { + return { type, timestamp: Date.now(), ...fields } as unknown as StreamChunk +} + +/** Create an async iterable from a list of chunks. */ +async function* streamOf( + ...chunks: Array +): AsyncIterable { + for (const c of chunks) { + yield c + } +} + +/** Shorthand for common event sequences. */ +const ev = { + runStarted: (runId = 'run-1') => chunk('RUN_STARTED', { runId }), + textStart: (messageId = 'msg-1') => + chunk('TEXT_MESSAGE_START', { messageId, role: 'assistant' as const }), + textContent: (delta: string, messageId = 'msg-1') => + chunk('TEXT_MESSAGE_CONTENT', { messageId, delta }), + textEnd: (messageId = 'msg-1') => chunk('TEXT_MESSAGE_END', { messageId }), + toolStart: (toolCallId: string, toolName: string, index?: number) => + chunk('TOOL_CALL_START', { + toolCallId, + toolName, + ...(index !== undefined ? { index } : {}), + }), + toolArgs: (toolCallId: string, delta: string) => + chunk('TOOL_CALL_ARGS', { toolCallId, delta }), + toolEnd: ( + toolCallId: string, + toolName: string, + opts?: { input?: unknown; result?: string }, + ) => chunk('TOOL_CALL_END', { toolCallId, toolName, ...opts }), + runFinished: ( + finishReason: + | 'stop' + | 'length' + | 'content_filter' + | 'tool_calls' + | null = 'stop', + runId = 'run-1', + ) => chunk('RUN_FINISHED', { runId, finishReason }), + runError: (message: string, runId = 'run-1') => + chunk('RUN_ERROR', { runId, error: { message } }), + stepFinished: (delta: string, stepId = 'step-1') => + chunk('STEP_FINISHED', { stepId, delta }), + custom: (name: string, data?: unknown) => chunk('CUSTOM', { name, data }), +} + +/** Events object with vi.fn() mocks for assertions. */ +type MockedEvents = { + [K in keyof Required]: Required[K] & + Mock +} + +/** Create a spy-laden events object for assertions. */ +function spyEvents(): MockedEvents { + return { + onMessagesChange: vi.fn(), + onStreamStart: vi.fn(), + onStreamEnd: vi.fn(), + onError: vi.fn(), + onToolCall: vi.fn(), + onApprovalRequest: vi.fn(), + onTextUpdate: vi.fn(), + onToolCallStateChange: vi.fn(), + onThinkingUpdate: vi.fn(), + } as MockedEvents +} + +// ============================================================================ +// Tests +// ============================================================================ describe('StreamProcessor', () => { - describe('handleTextMessageContentEvent', () => { - it('should handle TEXT_MESSAGE_CONTENT with delta', () => { + // ========================================================================== + // Constructor and options + // ========================================================================== + describe('constructor and options', () => { + it('should initialize with default options', () => { const processor = new StreamProcessor() - processor.startAssistantMessage() + expect(processor.getMessages()).toEqual([]) + expect(processor.getCurrentAssistantMessageId()).toBeNull() + expect(processor.getState().done).toBe(false) + }) - processor.processChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: 'msg-1', - delta: 'Hello', - model: 'test', - timestamp: Date.now(), - } as StreamChunk) + it('should accept initialMessages', () => { + const initial: Array = [ + { id: 'u1', role: 'user', parts: [{ type: 'text', content: 'hi' }] }, + ] + const processor = new StreamProcessor({ initialMessages: initial }) + expect(processor.getMessages()).toHaveLength(1) + expect(processor.getMessages()[0]!.id).toBe('u1') + }) - processor.processChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: 'msg-1', - delta: ' world', - model: 'test', - timestamp: Date.now(), - } as StreamChunk) + it('should not mutate the initialMessages array', () => { + const initial: Array = [ + { id: 'u1', role: 'user', parts: [{ type: 'text', content: 'hi' }] }, + ] + const processor = new StreamProcessor({ initialMessages: initial }) + processor.addUserMessage('second') + // Original array should be unchanged + expect(initial).toHaveLength(1) + expect(processor.getMessages()).toHaveLength(2) + }) - processor.processChunk({ - type: 'RUN_FINISHED', - model: 'test', - timestamp: Date.now(), - finishReason: 'stop', - } as StreamChunk) + it('should use custom chunkStrategy', () => { + const shouldEmit = vi.fn().mockReturnValue(false) + const strategy: ChunkStrategy = { shouldEmit } - const messages = processor.getMessages() - expect(messages).toHaveLength(1) - expect(messages[0]?.parts).toHaveLength(1) - expect(messages[0]?.parts[0]).toEqual({ + const processor = new StreamProcessor({ chunkStrategy: strategy }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.textContent('Hello')) + + // Strategy was consulted + expect(shouldEmit).toHaveBeenCalledWith('Hello', 'Hello') + + // Since strategy returned false, the text part is NOT emitted to the message + const msg = processor.getMessages()[0]! + expect(msg.parts).toHaveLength(0) + }) + + it('should use custom jsonParser', () => { + const parse = vi.fn().mockReturnValue({ custom: true }) + const processor = new StreamProcessor({ jsonParser: { parse } }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'myTool')) + processor.processChunk(ev.toolArgs('tc-1', '{"key":"val"}')) + + expect(parse).toHaveBeenCalledWith('{"key":"val"}') + expect( + processor.getState().toolCalls.get('tc-1')?.parsedArguments, + ).toEqual({ custom: true }) + }) + }) + + // ========================================================================== + // Message management + // ========================================================================== + describe('message management', () => { + it('setMessages should replace messages and emit change', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + const msgs: Array = [ + { + id: 'a1', + role: 'assistant', + parts: [{ type: 'text', content: 'hello' }], + }, + ] + processor.setMessages(msgs) + + expect(processor.getMessages()).toHaveLength(1) + expect(processor.getMessages()[0]!.id).toBe('a1') + expect(events.onMessagesChange).toHaveBeenCalledTimes(1) + }) + + it('setMessages should shallow-copy the input array', () => { + const processor = new StreamProcessor() + const msgs: Array = [ + { id: 'a1', role: 'assistant', parts: [] }, + ] + processor.setMessages(msgs) + msgs.push({ id: 'a2', role: 'user', parts: [] }) + expect(processor.getMessages()).toHaveLength(1) + }) + + it('addUserMessage with string content', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + const msg = processor.addUserMessage('Hello!') + + expect(msg.role).toBe('user') + expect(msg.parts).toEqual([{ type: 'text', content: 'Hello!' }]) + expect(msg.id).toBeTruthy() + expect(msg.createdAt).toBeInstanceOf(Date) + expect(processor.getMessages()).toHaveLength(1) + expect(events.onMessagesChange).toHaveBeenCalledTimes(1) + }) + + it('addUserMessage with multimodal content array', () => { + const processor = new StreamProcessor() + const msg = processor.addUserMessage([ + { type: 'text', content: 'What is this?' }, + { + type: 'image', + source: { type: 'url', value: 'https://example.com/img.png' }, + } as any, + ]) + + expect(msg.parts).toHaveLength(2) + expect(msg.parts[0]!.type).toBe('text') + expect(msg.parts[1]!.type).toBe('image') + }) + + it('addUserMessage with custom id', () => { + const processor = new StreamProcessor() + const msg = processor.addUserMessage('Hello', 'custom-id-42') + expect(msg.id).toBe('custom-id-42') + }) + + it('clearMessages should remove all messages and reset assistantMessageId', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.addUserMessage('one') + processor.addUserMessage('two') + events.onMessagesChange.mockClear() + + processor.clearMessages() + expect(processor.getMessages()).toHaveLength(0) + expect(processor.getCurrentAssistantMessageId()).toBeNull() + expect(events.onMessagesChange).toHaveBeenCalledTimes(1) + }) + + it('removeMessagesAfter should truncate after the given index', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.addUserMessage('zero') + processor.addUserMessage('one') + processor.addUserMessage('two') + events.onMessagesChange.mockClear() + + processor.removeMessagesAfter(0) + expect(processor.getMessages()).toHaveLength(1) + expect(processor.getMessages()[0]!.parts[0]).toEqual({ type: 'text', - content: 'Hello world', + content: 'zero', }) + expect(events.onMessagesChange).toHaveBeenCalledTimes(1) }) - it('should handle TEXT_MESSAGE_CONTENT with undefined delta (issue #257)', () => { + it('toModelMessages should convert all messages', () => { const processor = new StreamProcessor() - processor.startAssistantMessage() + processor.addUserMessage('Hello') - // Simulate a chunk where delta is undefined (which can happen in practice) - processor.processChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: 'msg-1', - delta: undefined, - content: 'Hello', - model: 'test', - timestamp: Date.now(), - } as unknown as StreamChunk) + // Simulate an assistant response + processor.prepareAssistantMessage() + processor.processChunk(ev.textContent('Hi there')) + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() - processor.processChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: 'msg-1', - delta: undefined, - content: 'Hello world', - model: 'test', - timestamp: Date.now(), - } as unknown as StreamChunk) + const modelMsgs = processor.toModelMessages() + expect(modelMsgs.length).toBeGreaterThanOrEqual(2) + expect(modelMsgs[0]!.role).toBe('user') + expect(modelMsgs[1]!.role).toBe('assistant') + }) + }) - processor.processChunk({ - type: 'RUN_FINISHED', - model: 'test', - timestamp: Date.now(), - finishReason: 'stop', - } as StreamChunk) + // ========================================================================== + // process() full stream + // ========================================================================== + describe('process() full stream', () => { + it('should consume async iterable and return ProcessorResult', async () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + const result = await processor.process( + streamOf( + ev.runStarted(), + ev.textStart(), + ev.textContent('Hello'), + ev.textContent(' world!'), + ev.textEnd(), + ev.runFinished('stop'), + ), + ) + + expect(result.content).toBe('Hello world!') + expect(result.finishReason).toBe('stop') + expect(result.toolCalls).toBeUndefined() + expect(result.thinking).toBeUndefined() + }) + + it('should return toolCalls in ProcessorResult when tool calls present', async () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + const result = await processor.process( + streamOf( + ev.runStarted(), + ev.toolStart('tc-1', 'getWeather'), + ev.toolArgs('tc-1', '{"city":"NYC"}'), + ev.toolEnd('tc-1', 'getWeather', { input: { city: 'NYC' } }), + ev.runFinished('tool_calls'), + ), + ) + + expect(result.content).toBe('') + expect(result.finishReason).toBe('tool_calls') + expect(result.toolCalls).toHaveLength(1) + expect(result.toolCalls![0]!.function.name).toBe('getWeather') + expect(result.toolCalls![0]!.function.arguments).toBe('{"city":"NYC"}') + }) + + it('should return thinking in ProcessorResult when thinking present', async () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + const result = await processor.process( + streamOf( + ev.runStarted(), + ev.stepFinished('Let me think'), + ev.stepFinished('...'), + ev.textStart(), + ev.textContent('Answer'), + ev.textEnd(), + ev.runFinished('stop'), + ), + ) + + expect(result.content).toBe('Answer') + expect(result.thinking).toBe('Let me think...') + }) + + it('should call finalizeStream after consuming the stream', async () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + await processor.process( + streamOf( + ev.runStarted(), + ev.textStart(), + ev.textContent('Done'), + ev.textEnd(), + ev.runFinished('stop'), + ), + ) + + // onStreamEnd should have been called (by finalizeStream) + expect(events.onStreamEnd).toHaveBeenCalledTimes(1) + }) + }) + + // ========================================================================== + // Single-shot text + // ========================================================================== + describe('single-shot text', () => { + it('should handle full text-only flow: RUN_STARTED -> TEXT_MESSAGE_START -> TEXT_MESSAGE_CONTENT* -> TEXT_MESSAGE_END -> RUN_FINISHED(stop)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.runStarted()) + processor.processChunk(ev.textStart()) + processor.processChunk(ev.textContent('Hello')) + processor.processChunk(ev.textContent(' world')) + processor.processChunk(ev.textContent('!')) + processor.processChunk(ev.textEnd()) + processor.processChunk(ev.runFinished('stop')) + + processor.finalizeStream() const messages = processor.getMessages() expect(messages).toHaveLength(1) - expect(messages[0]?.parts).toHaveLength(1) - // Should NOT contain "undefined" string - expect(messages[0]?.parts[0]).toEqual({ + expect(messages[0]!.role).toBe('assistant') + expect(messages[0]!.parts).toHaveLength(1) + expect(messages[0]!.parts[0]).toEqual({ type: 'text', - content: 'Hello world', + content: 'Hello world!', }) + + const state = processor.getState() + expect(state.content).toBe('Hello world!') + expect(state.finishReason).toBe('stop') + expect(state.done).toBe(true) }) - it('should handle TEXT_MESSAGE_CONTENT with empty delta', () => { + it('should handle TEXT_MESSAGE_CONTENT with delta (existing test preserved)', () => { const processor = new StreamProcessor() - processor.startAssistantMessage() - - // Empty delta should fall back to content - processor.processChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: 'msg-1', - delta: '', - content: 'Hello', - model: 'test', - timestamp: Date.now(), - } as StreamChunk) + processor.prepareAssistantMessage() - processor.processChunk({ - type: 'RUN_FINISHED', - model: 'test', - timestamp: Date.now(), - finishReason: 'stop', - } as StreamChunk) + processor.processChunk(ev.textContent('Hello')) + processor.processChunk(ev.textContent(' world')) + processor.processChunk(ev.runFinished('stop')) const messages = processor.getMessages() expect(messages).toHaveLength(1) expect(messages[0]?.parts).toHaveLength(1) expect(messages[0]?.parts[0]).toEqual({ type: 'text', - content: 'Hello', + content: 'Hello world', }) }) + }) - it('should handle TEXT_MESSAGE_CONTENT with only content (no delta)', () => { + // ========================================================================== + // Single-shot tool call + // ========================================================================== + describe('single-shot tool call', () => { + it('should handle tool call only (no text)', () => { const processor = new StreamProcessor() - processor.startAssistantMessage() + processor.prepareAssistantMessage() - // Some servers may only send content without delta - processor.processChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: 'msg-1', - content: 'Hello', - model: 'test', - timestamp: Date.now(), - } as unknown as StreamChunk) + processor.processChunk(ev.runStarted()) + processor.processChunk(ev.toolStart('call_1', 'getWeather')) + processor.processChunk(ev.toolArgs('call_1', '{"city":')) + processor.processChunk(ev.toolArgs('call_1', '"NYC"}')) + processor.processChunk(ev.toolEnd('call_1', 'getWeather')) + processor.processChunk(ev.runFinished('tool_calls')) - processor.processChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: 'msg-1', - content: 'Hello world', - model: 'test', - timestamp: Date.now(), - } as unknown as StreamChunk) + processor.finalizeStream() - processor.processChunk({ - type: 'RUN_FINISHED', - model: 'test', - timestamp: Date.now(), - finishReason: 'stop', - } as StreamChunk) + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + expect(messages[0]!.parts).toHaveLength(1) + + const toolCallPart = messages[0]!.parts[0] as ToolCallPart + expect(toolCallPart.type).toBe('tool-call') + expect(toolCallPart.id).toBe('call_1') + expect(toolCallPart.name).toBe('getWeather') + expect(toolCallPart.arguments).toBe('{"city":"NYC"}') + expect(toolCallPart.state).toBe('input-complete') + + const state = processor.getState() + expect(state.content).toBe('') + expect(state.finishReason).toBe('tool_calls') + }) + + it('should handle text then tool call', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.runStarted()) + processor.processChunk(ev.textStart()) + processor.processChunk(ev.textContent('Let me check.')) + processor.processChunk(ev.textEnd()) + processor.processChunk(ev.toolStart('call_1', 'getWeather')) + processor.processChunk(ev.toolArgs('call_1', '{"city":"NYC"}')) + processor.processChunk( + ev.toolEnd('call_1', 'getWeather', { input: { city: 'NYC' } }), + ) + processor.processChunk(ev.runFinished('tool_calls')) + + processor.finalizeStream() const messages = processor.getMessages() expect(messages).toHaveLength(1) - expect(messages[0]?.parts).toHaveLength(1) - expect(messages[0]?.parts[0]).toEqual({ + expect(messages[0]!.parts).toHaveLength(2) + expect(messages[0]!.parts[0]).toEqual({ type: 'text', - content: 'Hello world', + content: 'Let me check.', }) + + const tcPart = messages[0]!.parts[1] as ToolCallPart + expect(tcPart.type).toBe('tool-call') + expect(tcPart.state).toBe('input-complete') }) - it('should have empty parts when no TEXT_MESSAGE_CONTENT is received', () => { + it('should track state transitions: awaiting-input -> input-streaming -> input-complete', () => { const processor = new StreamProcessor() - processor.startAssistantMessage() + processor.prepareAssistantMessage() - // Only RUN_FINISHED without any text content - processor.processChunk({ - type: 'RUN_FINISHED', - model: 'test', - timestamp: Date.now(), - finishReason: 'stop', - } as StreamChunk) + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'awaiting-input', + ) + + processor.processChunk(ev.toolArgs('tc-1', '{"city":')) + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'input-streaming', + ) + + processor.processChunk(ev.toolArgs('tc-1', '"NYC"}')) + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'input-streaming', + ) + + processor.processChunk(ev.toolEnd('tc-1', 'getWeather')) + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'input-complete', + ) + }) + + it('should use chunk.input as canonical parsed arguments (existing test preserved)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk( + ev.toolEnd('tc-1', 'getWeather', { + input: { city: 'NYC', unit: 'celsius' }, + }), + ) + + const state = processor.getState() + const toolCall = state.toolCalls.get('tc-1') + expect(toolCall?.state).toBe('input-complete') + expect(toolCall?.parsedArguments).toEqual({ + city: 'NYC', + unit: 'celsius', + }) + }) + + it('should default tool call index to toolCalls.size when index is not provided', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'toolA')) + processor.processChunk(ev.toolStart('tc-2', 'toolB')) + + const state = processor.getState() + expect(state.toolCalls.get('tc-1')?.index).toBe(0) + expect(state.toolCalls.get('tc-2')?.index).toBe(1) + }) + + it('should use provided index when available', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'toolA', 5)) + + expect(processor.getState().toolCalls.get('tc-1')?.index).toBe(5) + }) + }) + + // ========================================================================== + // Parallel tool calls + // ========================================================================== + describe('parallel tool calls', () => { + it('should handle interleaved parallel tool calls', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + // Interleaved: both STARTs, then both ARGS, then both ENDs + processor.processChunk(ev.runStarted()) + processor.processChunk(ev.toolStart('call_1', 'getWeather', 0)) + processor.processChunk(ev.toolStart('call_2', 'getTime', 1)) + processor.processChunk(ev.toolArgs('call_1', '{"city":"NYC"}')) + processor.processChunk(ev.toolArgs('call_2', '{"tz":"EST"}')) + processor.processChunk(ev.toolEnd('call_1', 'getWeather')) + processor.processChunk(ev.toolEnd('call_2', 'getTime')) + processor.processChunk(ev.runFinished('tool_calls')) + + processor.finalizeStream() const messages = processor.getMessages() expect(messages).toHaveLength(1) - // Parts should be empty when no content was received - expect(messages[0]?.parts).toHaveLength(0) + expect(messages[0]!.parts).toHaveLength(2) + + const tc1 = messages[0]!.parts[0] as ToolCallPart + const tc2 = messages[0]!.parts[1] as ToolCallPart + expect(tc1.id).toBe('call_1') + expect(tc1.name).toBe('getWeather') + expect(tc1.arguments).toBe('{"city":"NYC"}') + expect(tc1.state).toBe('input-complete') + + expect(tc2.id).toBe('call_2') + expect(tc2.name).toBe('getTime') + expect(tc2.arguments).toBe('{"tz":"EST"}') + expect(tc2.state).toBe('input-complete') + }) + + it('should handle sequential parallel tool calls', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + // Sequential: one tool fully completes before the next starts + processor.processChunk(ev.toolStart('call_1', 'getWeather')) + processor.processChunk(ev.toolArgs('call_1', '{"city":"NYC"}')) + processor.processChunk(ev.toolEnd('call_1', 'getWeather')) + processor.processChunk(ev.toolStart('call_2', 'getTime')) + processor.processChunk(ev.toolArgs('call_2', '{"tz":"EST"}')) + processor.processChunk(ev.toolEnd('call_2', 'getTime')) + processor.processChunk(ev.runFinished('tool_calls')) + + processor.finalizeStream() + + const messages = processor.getMessages() + expect(messages[0]!.parts).toHaveLength(2) + expect((messages[0]!.parts[0] as ToolCallPart).id).toBe('call_1') + expect((messages[0]!.parts[1] as ToolCallPart).id).toBe('call_2') + }) + + it('should track tool calls by toolCallId in Map, maintaining order', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('call_1', 'getWeather')) + processor.processChunk(ev.toolStart('call_2', 'getTime')) + + const state = processor.getState() + expect(state.toolCallOrder).toEqual(['call_1', 'call_2']) + expect(state.toolCalls.size).toBe(2) + }) + }) + + // ========================================================================== + // Text-tool interleaving + // ========================================================================== + describe('text-tool interleaving', () => { + it('should reset segment text accumulation on TEXT_MESSAGE_START (existing test preserved)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.textStart()) + processor.processChunk(ev.textContent('First segment')) + processor.processChunk(ev.toolStart('tc-1', 'search')) + processor.processChunk(ev.toolEnd('tc-1', 'search', { input: {} })) + processor.processChunk(ev.textStart('msg-2')) + processor.processChunk(ev.textContent('Second segment', 'msg-2')) + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() + + const state = processor.getState() + expect(state.content).toBe('First segmentSecond segment') + }) + + it('should produce text -> tool-call -> tool-result -> text (4-part ordering)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + // First adapter stream: text + tool call + processor.processChunk(ev.runStarted()) + processor.processChunk(ev.textStart()) + processor.processChunk(ev.textContent('Checking weather...')) + processor.processChunk(ev.textEnd()) + processor.processChunk(ev.toolStart('call_1', 'getWeather')) + processor.processChunk(ev.toolArgs('call_1', '{"city":"NYC"}')) + processor.processChunk(ev.toolEnd('call_1', 'getWeather')) + processor.processChunk(ev.runFinished('tool_calls')) + + // TextEngine executes tool, yields result + processor.processChunk( + ev.toolEnd('call_1', 'getWeather', { result: '{"temp":"72F"}' }), + ) + + // Second adapter stream: more text + processor.processChunk(ev.textStart('msg-2')) + processor.processChunk(ev.textContent("It's 72F in NYC.", 'msg-2')) + processor.processChunk(ev.textEnd('msg-2')) + processor.processChunk(ev.runFinished('stop')) + + processor.finalizeStream() + + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + const parts = messages[0]!.parts + + // Expected: [text, tool-call, tool-result, text] + expect(parts).toHaveLength(4) + expect(parts[0]!.type).toBe('text') + expect((parts[0] as any).content).toBe('Checking weather...') + + expect(parts[1]!.type).toBe('tool-call') + expect((parts[1] as ToolCallPart).id).toBe('call_1') + + expect(parts[2]!.type).toBe('tool-result') + expect((parts[2] as ToolResultPart).toolCallId).toBe('call_1') + + expect(parts[3]!.type).toBe('text') + expect((parts[3] as any).content).toBe("It's 72F in NYC.") + }) + + it('should create two separate TextParts for text before and after tool calls', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.textStart()) + processor.processChunk(ev.textContent('Before')) + processor.processChunk(ev.textEnd()) + processor.processChunk(ev.toolStart('tc-1', 'tool')) + processor.processChunk(ev.toolEnd('tc-1', 'tool')) + processor.processChunk(ev.textStart('msg-2')) + processor.processChunk(ev.textContent('After', 'msg-2')) + processor.processChunk(ev.textEnd('msg-2')) + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() + + const parts = processor.getMessages()[0]!.parts + const textParts = parts.filter((p) => p.type === 'text') + expect(textParts).toHaveLength(2) + expect((textParts[0] as any).content).toBe('Before') + expect((textParts[1] as any).content).toBe('After') + }) + }) + + // ========================================================================== + // Thinking/reasoning + // ========================================================================== + describe('thinking/reasoning', () => { + it('should accumulate STEP_FINISHED deltas into thinking content', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.stepFinished('Let me think')) + processor.processChunk(ev.stepFinished(' about this...')) + + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + const thinkingPart = messages[0]!.parts.find((p) => p.type === 'thinking') + expect(thinkingPart).toBeDefined() + expect((thinkingPart as any).content).toBe('Let me think about this...') + }) + + it('should handle thinking then text flow', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.runStarted()) + processor.processChunk(ev.stepFinished('Let me think')) + processor.processChunk(ev.stepFinished(' about this...')) + processor.processChunk(ev.textStart()) + processor.processChunk(ev.textContent("Here's my answer.")) + processor.processChunk(ev.textEnd()) + processor.processChunk(ev.runFinished('stop')) + + processor.finalizeStream() + + const parts = processor.getMessages()[0]!.parts + expect(parts).toHaveLength(2) + expect(parts[0]!.type).toBe('thinking') + expect((parts[0] as any).content).toBe('Let me think about this...') + expect(parts[1]!.type).toBe('text') + expect((parts[1] as any).content).toBe("Here's my answer.") + + const state = processor.getState() + expect(state.thinking).toBe('Let me think about this...') + expect(state.content).toBe("Here's my answer.") + }) + + it('should create assistant message lazily on thinking content (existing test preserved)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + expect(processor.getMessages()).toHaveLength(0) + + processor.processChunk(ev.stepFinished('thinking...')) + + expect(processor.getMessages()).toHaveLength(1) + expect( + processor.getMessages()[0]?.parts.some((p) => p.type === 'thinking'), + ).toBe(true) + }) + + it('should update a single ThinkingPart in-place', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.stepFinished('A')) + processor.processChunk(ev.stepFinished('B')) + processor.processChunk(ev.stepFinished('C')) + + // Only one thinking part, not three + const parts = processor.getMessages()[0]!.parts + const thinkingParts = parts.filter((p) => p.type === 'thinking') + expect(thinkingParts).toHaveLength(1) + expect((thinkingParts[0] as any).content).toBe('ABC') + }) + }) + + // ========================================================================== + // Tool results + // ========================================================================== + describe('tool results', () => { + it('should update tool-call part output field when TOOL_CALL_END has a result (existing test)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk(ev.toolArgs('tc-1', '{"city":"NYC"}')) + processor.processChunk( + ev.toolEnd('tc-1', 'getWeather', { + input: { city: 'NYC' }, + result: '{"temp":72}', + }), + ) + + const messages = processor.getMessages() + const toolCallPart = messages[0]?.parts.find( + (p) => p.type === 'tool-call', + ) as ToolCallPart + expect((toolCallPart as any).output).toEqual({ temp: 72 }) + + const toolResultPart = messages[0]?.parts.find( + (p) => p.type === 'tool-result', + ) as ToolResultPart + expect(toolResultPart).toBeDefined() + expect(toolResultPart.content).toBe('{"temp":72}') + expect(toolResultPart.state).toBe('complete') + }) + + it('should not update tool-call output when TOOL_CALL_END has no result (existing test)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk( + ev.toolEnd('tc-1', 'getWeather', { input: { city: 'NYC' } }), + ) + + const messages = processor.getMessages() + const toolCallPart = messages[0]?.parts.find( + (p) => p.type === 'tool-call', + ) + expect((toolCallPart as any).output).toBeUndefined() + expect( + messages[0]?.parts.find((p) => p.type === 'tool-result'), + ).toBeUndefined() + }) + + it('should handle non-JSON result string gracefully (existing test)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getText')) + processor.processChunk( + ev.toolEnd('tc-1', 'getText', { + input: {}, + result: 'plain text result', + }), + ) + + const toolCallPart = processor + .getMessages()[0] + ?.parts.find((p) => p.type === 'tool-call') + expect((toolCallPart as any).output).toBe('plain text result') + }) + + it('addToolResult should create tool-result part and set output on tool-call part', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk( + ev.toolEnd('tc-1', 'getWeather', { input: { city: 'NYC' } }), + ) + events.onMessagesChange.mockClear() + + processor.addToolResult('tc-1', { temp: 72 }) + + const messages = processor.getMessages() + const toolCallPart = messages[0]!.parts.find( + (p) => p.type === 'tool-call', + ) as ToolCallPart + expect((toolCallPart as any).output).toEqual({ temp: 72 }) + + const toolResultPart = messages[0]!.parts.find( + (p) => p.type === 'tool-result', + ) as ToolResultPart + expect(toolResultPart).toBeDefined() + expect(toolResultPart.content).toBe('{"temp":72}') + expect(toolResultPart.state).toBe('complete') + + expect(events.onMessagesChange).toHaveBeenCalled() + }) + + it('addToolResult with string output should store as-is', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getText')) + processor.processChunk(ev.toolEnd('tc-1', 'getText', { input: {} })) + + processor.addToolResult('tc-1', 'plain string output') + + const toolResultPart = processor + .getMessages()[0]! + .parts.find((p) => p.type === 'tool-result') as ToolResultPart + expect(toolResultPart.content).toBe('plain string output') + }) + + it('addToolResult with error should set error state', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk(ev.toolEnd('tc-1', 'getWeather', { input: {} })) + + processor.addToolResult('tc-1', null, 'Network error') + + const toolCallPart = processor + .getMessages()[0]! + .parts.find((p) => p.type === 'tool-call') as ToolCallPart + expect((toolCallPart as any).output).toEqual({ error: 'Network error' }) + + const toolResultPart = processor + .getMessages()[0]! + .parts.find((p) => p.type === 'tool-result') as ToolResultPart + expect(toolResultPart.state).toBe('error') + expect(toolResultPart.error).toBe('Network error') + }) + + it('addToolResult with missing toolCallId should warn and no-op', () => { + const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {}) + const processor = new StreamProcessor() + + processor.addToolResult('nonexistent-id', { temp: 72 }) + + expect(warnSpy).toHaveBeenCalledWith( + expect.stringContaining('nonexistent-id'), + ) + expect(processor.getMessages()).toHaveLength(0) + warnSpy.mockRestore() + }) + }) + + // ========================================================================== + // Client tools (CUSTOM events) + // ========================================================================== + describe('client tools (CUSTOM events)', () => { + it('should fire onToolCall when tool-input-available CUSTOM event arrives', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + // Create a tool call first + processor.processChunk(ev.toolStart('tc-1', 'clientTool')) + processor.processChunk( + ev.toolEnd('tc-1', 'clientTool', { input: { query: 'test' } }), + ) + + processor.processChunk( + ev.custom('tool-input-available', { + toolCallId: 'tc-1', + toolName: 'clientTool', + input: { query: 'test' }, + }), + ) + + expect(events.onToolCall).toHaveBeenCalledTimes(1) + expect(events.onToolCall).toHaveBeenCalledWith({ + toolCallId: 'tc-1', + toolName: 'clientTool', + input: { query: 'test' }, + }) + }) + + it('should not fire onToolCall for CUSTOM events with no data', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.custom('tool-input-available')) + + expect(events.onToolCall).not.toHaveBeenCalled() + }) + + it('should not fire onToolCall for unrelated CUSTOM events', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.custom('some-other-event', { foo: 'bar' })) + + expect(events.onToolCall).not.toHaveBeenCalled() + }) + }) + + // ========================================================================== + // Approval flow + // ========================================================================== + describe('approval flow', () => { + it('should handle approval-requested CUSTOM event', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + // Create a tool call + processor.processChunk(ev.toolStart('tc-1', 'dangerousTool')) + processor.processChunk( + ev.toolEnd('tc-1', 'dangerousTool', { input: { action: 'delete' } }), + ) + + // Fire approval request + processor.processChunk( + ev.custom('approval-requested', { + toolCallId: 'tc-1', + toolName: 'dangerousTool', + input: { action: 'delete' }, + approval: { id: 'approval-1', needsApproval: true }, + }), + ) + + // Check that tool-call part has approval state + const toolCallPart = processor + .getMessages()[0]! + .parts.find((p) => p.type === 'tool-call') as ToolCallPart + expect(toolCallPart.state).toBe('approval-requested') + expect(toolCallPart.approval).toEqual({ + id: 'approval-1', + needsApproval: true, + }) + + // Check callback was fired + expect(events.onApprovalRequest).toHaveBeenCalledWith({ + toolCallId: 'tc-1', + toolName: 'dangerousTool', + input: { action: 'delete' }, + approvalId: 'approval-1', + }) + }) + + it('addToolApprovalResponse should approve a tool call', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'dangerousTool')) + processor.processChunk(ev.toolEnd('tc-1', 'dangerousTool', { input: {} })) + processor.processChunk( + ev.custom('approval-requested', { + toolCallId: 'tc-1', + toolName: 'dangerousTool', + input: {}, + approval: { id: 'approval-1', needsApproval: true }, + }), + ) + + events.onMessagesChange.mockClear() + processor.addToolApprovalResponse('approval-1', true) + + const toolCallPart = processor + .getMessages()[0]! + .parts.find((p) => p.type === 'tool-call') as ToolCallPart + expect(toolCallPart.state).toBe('approval-responded') + expect(toolCallPart.approval?.approved).toBe(true) + expect(events.onMessagesChange).toHaveBeenCalled() + }) + + it('addToolApprovalResponse should deny a tool call', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'dangerousTool')) + processor.processChunk(ev.toolEnd('tc-1', 'dangerousTool', { input: {} })) + processor.processChunk( + ev.custom('approval-requested', { + toolCallId: 'tc-1', + toolName: 'dangerousTool', + input: {}, + approval: { id: 'approval-1', needsApproval: true }, + }), + ) + + processor.addToolApprovalResponse('approval-1', false) + + const toolCallPart = processor + .getMessages()[0]! + .parts.find((p) => p.type === 'tool-call') as ToolCallPart + expect(toolCallPart.state).toBe('approval-responded') + expect(toolCallPart.approval?.approved).toBe(false) + }) + + it('should not fire onApprovalRequest for approval-requested without data', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.custom('approval-requested')) + expect(events.onApprovalRequest).not.toHaveBeenCalled() + }) + }) + + // ========================================================================== + // areAllToolsComplete + // ========================================================================== + describe('areAllToolsComplete', () => { + it('should return true when there is no assistant message', () => { + const processor = new StreamProcessor() + expect(processor.areAllToolsComplete()).toBe(true) + }) + + it('should return true when there are no tool calls', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + processor.processChunk(ev.textContent('Hello')) + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() + + expect(processor.areAllToolsComplete()).toBe(true) + }) + + it('should return false when tool calls are pending (no result, no output)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk(ev.toolEnd('tc-1', 'getWeather', { input: {} })) + + expect(processor.areAllToolsComplete()).toBe(false) + }) + + it('should return true when tool call has corresponding tool-result part (server tool)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk( + ev.toolEnd('tc-1', 'getWeather', { input: {}, result: '{"temp":72}' }), + ) + + expect(processor.areAllToolsComplete()).toBe(true) + }) + + it('should return true when client tool has output (via addToolResult)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + processor.processChunk(ev.toolStart('tc-1', 'clientTool')) + processor.processChunk(ev.toolEnd('tc-1', 'clientTool', { input: {} })) + + processor.addToolResult('tc-1', { data: 'result' }) + + expect(processor.areAllToolsComplete()).toBe(true) + }) + + it('should return true when tool call is in approval-responded state', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + processor.processChunk(ev.toolStart('tc-1', 'dangerousTool')) + processor.processChunk(ev.toolEnd('tc-1', 'dangerousTool', { input: {} })) + processor.processChunk( + ev.custom('approval-requested', { + toolCallId: 'tc-1', + toolName: 'dangerousTool', + input: {}, + approval: { id: 'a1', needsApproval: true }, + }), + ) + processor.addToolApprovalResponse('a1', true) + + expect(processor.areAllToolsComplete()).toBe(true) + }) + + it('should return false when some tool calls are complete but not all', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk( + ev.toolEnd('tc-1', 'getWeather', { input: {}, result: '{"temp":72}' }), + ) + processor.processChunk(ev.toolStart('tc-2', 'getTime')) + processor.processChunk(ev.toolEnd('tc-2', 'getTime', { input: {} })) + + // tc-1 has a result, but tc-2 does not + expect(processor.areAllToolsComplete()).toBe(false) + }) + }) + + // ========================================================================== + // Lazy assistant message creation (existing tests preserved) + // ========================================================================== + describe('lazy assistant message creation', () => { + it('should not create assistant message when no content arrives', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() + + expect(processor.getMessages()).toHaveLength(0) + expect(processor.getCurrentAssistantMessageId()).toBeNull() + }) + + it('should create assistant message lazily on first text content', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + expect(processor.getMessages()).toHaveLength(0) + expect(processor.getCurrentAssistantMessageId()).toBeNull() + + processor.processChunk(ev.textContent('Hello!')) + + expect(processor.getMessages()).toHaveLength(1) + expect(processor.getCurrentAssistantMessageId()).not.toBeNull() + expect(processor.getMessages()[0]?.role).toBe('assistant') + }) + + it('should create assistant message lazily on first tool call', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + expect(processor.getMessages()).toHaveLength(0) + + processor.processChunk(ev.toolStart('tc-1', 'getGuitars')) + + expect(processor.getMessages()).toHaveLength(1) + expect( + processor.getMessages()[0]?.parts.some((p) => p.type === 'tool-call'), + ).toBe(true) + }) + + it('should create assistant message lazily on error', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + expect(processor.getMessages()).toHaveLength(0) + + processor.processChunk(ev.runError('Something went wrong')) + processor.finalizeStream() + + const messages = processor.getMessages() + expect(messages).toHaveLength(1) + expect(messages[0]?.role).toBe('assistant') + expect(messages[0]?.parts).toHaveLength(0) + }) + + it('should create assistant message lazily on thinking content', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + // No message yet + expect(processor.getMessages()).toHaveLength(0) + + processor.processChunk({ + type: 'STEP_FINISHED', + stepId: 'step-1', + model: 'test', + timestamp: Date.now(), + delta: 'thinking...', + content: 'thinking...', + } as StreamChunk) + + // Now the message exists with thinking content + expect(processor.getMessages()).toHaveLength(1) + expect( + processor.getMessages()[0]?.parts.some((p) => p.type === 'thinking'), + ).toBe(true) + }) + + it('should not create assistant message during empty multi-turn continuation', () => { + const processor = new StreamProcessor() + + processor.addUserMessage('recommend a guitar') + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getGuitars')) + processor.processChunk(ev.toolEnd('tc-1', 'getGuitars', { input: {} })) + processor.processChunk(ev.runFinished('tool_calls')) + processor.finalizeStream() + + expect(processor.getMessages()).toHaveLength(2) + + // Auto-continuation: prepare but no content + processor.prepareAssistantMessage() + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() + + expect(processor.getMessages()).toHaveLength(2) + expect(processor.getMessages()[1]?.role).toBe('assistant') + }) + + it('should support deprecated startAssistantMessage for backwards compatibility', () => { + const processor = new StreamProcessor() + const messageId = processor.startAssistantMessage() + + expect(messageId).toBeTruthy() + expect(processor.getMessages()).toHaveLength(1) + expect(processor.getMessages()[0]?.id).toBe(messageId) + }) + }) + + // ========================================================================== + // Edge cases + // ========================================================================== + describe('edge cases', () => { + it('duplicate TOOL_CALL_START with same toolCallId should be a no-op', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) // duplicate + + const state = processor.getState() + expect(state.toolCalls.size).toBe(1) + expect(state.toolCallOrder).toEqual(['tc-1']) + + // Only one tool-call part in UIMessage + const toolParts = processor + .getMessages()[0]! + .parts.filter((p) => p.type === 'tool-call') + expect(toolParts).toHaveLength(1) + }) + + it('TOOL_CALL_ARGS for unknown toolCallId should be silently dropped', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + // Send args without a preceding START + processor.processChunk(ev.toolArgs('unknown-id', '{"key":"val"}')) + + // No tool calls in state + expect(processor.getState().toolCalls.size).toBe(0) + // No messages created (no content-bearing event) + expect(processor.getMessages()).toHaveLength(0) + }) + + it('empty delta in TOOL_CALL_ARGS should not transition from awaiting-input', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'myTool')) + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'awaiting-input', + ) + + // Empty delta + processor.processChunk(ev.toolArgs('tc-1', '')) + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'awaiting-input', + ) + + // Non-empty delta transitions + processor.processChunk(ev.toolArgs('tc-1', '{"key":')) + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'input-streaming', + ) + }) + + it('missing TOOL_CALL_END should be caught by RUN_FINISHED safety net', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk(ev.toolArgs('tc-1', '{"city":"NYC"}')) + // No TOOL_CALL_END! + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'input-streaming', + ) + + processor.processChunk(ev.runFinished('tool_calls')) + // Safety net should force-complete + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'input-complete', + ) + }) + + it('TOOL_CALL_END for already input-complete tool call should be a no-op for state', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk(ev.toolEnd('tc-1', 'getWeather', { input: {} })) + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'input-complete', + ) + + // Second TOOL_CALL_END with result still processes the result + processor.processChunk( + ev.toolEnd('tc-1', 'getWeather', { result: '{"temp":72}' }), + ) + + // Should have added tool-result part + const resultPart = processor + .getMessages()[0]! + .parts.find((p) => p.type === 'tool-result') + expect(resultPart).toBeDefined() + }) + + it('ignored event types (RUN_STARTED, TEXT_MESSAGE_END, STEP_STARTED, STATE_SNAPSHOT, STATE_DELTA) should not crash', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + // These should all be silently ignored + processor.processChunk(chunk('RUN_STARTED', { runId: 'run-1' })) + processor.processChunk(chunk('TEXT_MESSAGE_END', { messageId: 'msg-1' })) + processor.processChunk(chunk('STEP_STARTED', { stepId: 'step-1' })) + processor.processChunk(chunk('STATE_SNAPSHOT', { state: { key: 'val' } })) + processor.processChunk(chunk('STATE_DELTA', { delta: { key: 'val' } })) + + // No messages created (none of these are content-bearing) + expect(processor.getMessages()).toHaveLength(0) + }) + + it('RUN_ERROR with empty message should use fallback', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.runError('')) + + expect(events.onError).toHaveBeenCalledTimes(1) + const errorArg = events.onError.mock.calls[0]![0] + expect(errorArg.message).toBe('An error occurred') + }) + }) + + // ========================================================================== + // Event callbacks + // ========================================================================== + describe('event callbacks', () => { + it('onMessagesChange should be called on every message state change', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.textContent('Hello')) + processor.processChunk(ev.textContent(' world')) + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() + + // ensureAssistantMessage (creates msg) + textContent emission + textContent emission + // + runFinished (no tool calls, no change) + finalizeStream (no pending text, onStreamEnd no messages change) + // The exact count depends on internal emission logic; let's verify it was called + expect(events.onMessagesChange.mock.calls.length).toBeGreaterThanOrEqual( + 2, + ) + }) + + it('onStreamStart should fire on first content-bearing chunk', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + // Non-content chunk should NOT trigger onStreamStart + processor.processChunk(ev.runStarted()) + expect(events.onStreamStart).not.toHaveBeenCalled() + + // First content chunk triggers it + processor.processChunk(ev.textContent('Hi')) + expect(events.onStreamStart).toHaveBeenCalledTimes(1) + + // Subsequent content does NOT re-trigger + processor.processChunk(ev.textContent(' there')) + expect(events.onStreamStart).toHaveBeenCalledTimes(1) + }) + + it('onStreamEnd should fire during finalizeStream with the assistant message', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.textContent('Done')) + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() + + expect(events.onStreamEnd).toHaveBeenCalledTimes(1) + const msg = events.onStreamEnd.mock.calls[0]![0] + expect(msg.role).toBe('assistant') + expect(msg.parts[0]).toEqual({ type: 'text', content: 'Done' }) + }) + + it('onStreamEnd should NOT fire if no assistant message was created', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() + + expect(events.onStreamEnd).not.toHaveBeenCalled() + }) + + it('onError should fire on RUN_ERROR', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.runError('API rate limited')) + + expect(events.onError).toHaveBeenCalledTimes(1) + expect(events.onError.mock.calls[0]![0]).toBeInstanceOf(Error) + expect(events.onError.mock.calls[0]![0].message).toBe('API rate limited') + }) + + it('onTextUpdate should fire for each text emission', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.textContent('Hello')) + processor.processChunk(ev.textContent(' world')) + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() + + // With ImmediateStrategy, each content chunk triggers onTextUpdate + expect(events.onTextUpdate).toHaveBeenCalledTimes(2) + const msgId = processor.getCurrentAssistantMessageId()! + expect(events.onTextUpdate).toHaveBeenCalledWith(msgId, 'Hello') + expect(events.onTextUpdate).toHaveBeenCalledWith(msgId, 'Hello world') + }) + + it('onToolCallStateChange should fire on state transitions', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk(ev.toolArgs('tc-1', '{"city":"NYC"}')) + processor.processChunk(ev.toolEnd('tc-1', 'getWeather')) + + const msgId = processor.getCurrentAssistantMessageId()! + + // START -> awaiting-input + expect(events.onToolCallStateChange).toHaveBeenCalledWith( + msgId, + 'tc-1', + 'awaiting-input', + '', + ) + // ARGS -> input-streaming + expect(events.onToolCallStateChange).toHaveBeenCalledWith( + msgId, + 'tc-1', + 'input-streaming', + '{"city":"NYC"}', + ) + // END -> input-complete + expect(events.onToolCallStateChange).toHaveBeenCalledWith( + msgId, + 'tc-1', + 'input-complete', + '{"city":"NYC"}', + ) + }) + + it('onThinkingUpdate should fire for each STEP_FINISHED delta', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.stepFinished('Thinking')) + processor.processChunk(ev.stepFinished(' more')) + + const msgId = processor.getCurrentAssistantMessageId()! + expect(events.onThinkingUpdate).toHaveBeenCalledTimes(2) + expect(events.onThinkingUpdate).toHaveBeenCalledWith(msgId, 'Thinking') + expect(events.onThinkingUpdate).toHaveBeenCalledWith( + msgId, + 'Thinking more', + ) + }) + }) + + // ========================================================================== + // Recording and replay + // ========================================================================== + describe('recording and replay', () => { + it('should record chunks when recording option is enabled via process()', async () => { + const processor = new StreamProcessor({ recording: true }) + processor.prepareAssistantMessage() + + await processor.process( + streamOf( + ev.runStarted(), + ev.textStart(), + ev.textContent('Hello'), + ev.textEnd(), + ev.runFinished('stop'), + ), + ) + + const recording = processor.getRecording() + expect(recording).not.toBeNull() + expect(recording!.version).toBe('1.0') + expect(recording!.chunks).toHaveLength(5) + expect(recording!.result).toBeDefined() + expect(recording!.result!.content).toBe('Hello') + expect(recording!.result!.finishReason).toBe('stop') + }) + + it('should record chunks when startRecording is called manually', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.startRecording() + processor.processChunk(ev.textContent('Hello')) + processor.processChunk(ev.runFinished('stop')) + + const recording = processor.getRecording() + expect(recording).not.toBeNull() + expect(recording!.chunks).toHaveLength(2) + expect(recording!.chunks[0]!.index).toBe(0) + expect(recording!.chunks[1]!.index).toBe(1) + }) + + it('getRecording should return null when recording is not enabled', () => { + const processor = new StreamProcessor() + expect(processor.getRecording()).toBeNull() + }) + + it('StreamProcessor.replay should process a recording', async () => { + // First, create a recording + const processor = new StreamProcessor({ recording: true }) + processor.prepareAssistantMessage() + + await processor.process( + streamOf( + ev.runStarted(), + ev.textStart(), + ev.textContent('Replayed'), + ev.textEnd(), + ev.runFinished('stop'), + ), + ) + + const recording = processor.getRecording()! + + // Now replay it + const result = await StreamProcessor.replay(recording) + expect(result.content).toBe('Replayed') + expect(result.finishReason).toBe('stop') + }) + + it('createReplayStream should yield all chunks from a recording', async () => { + const recording = { + version: '1.0' as const, + timestamp: Date.now(), + chunks: [ + { chunk: ev.textContent('A'), timestamp: Date.now(), index: 0 }, + { chunk: ev.textContent('B'), timestamp: Date.now(), index: 1 }, + ], + } + + const stream = createReplayStream(recording) + const chunks: Array = [] + for await (const c of stream) { + chunks.push(c) + } + + expect(chunks).toHaveLength(2) + expect((chunks[0] as any).delta).toBe('A') + expect((chunks[1] as any).delta).toBe('B') + }) + + it('replay should pass options to the new processor', async () => { + const events = spyEvents() + const recording = { + version: '1.0' as const, + timestamp: Date.now(), + chunks: [ + { chunk: ev.textContent('Hello'), timestamp: Date.now(), index: 0 }, + { chunk: ev.runFinished('stop'), timestamp: Date.now(), index: 1 }, + ], + } + + await StreamProcessor.replay(recording, { events }) + expect(events.onStreamEnd).toHaveBeenCalledTimes(1) + }) + }) + + // ========================================================================== + // finalizeStream and reset + // ========================================================================== + describe('finalizeStream and reset', () => { + it('finalizeStream should flush pending text not yet emitted', () => { + // Use a strategy that buffers (never emits during streaming) + const strategy: ChunkStrategy = { + shouldEmit: () => false, + } + const processor = new StreamProcessor({ chunkStrategy: strategy }) + processor.prepareAssistantMessage() + + processor.processChunk(ev.textContent('Buffered')) + // Text was buffered, not emitted + const msgBefore = processor.getMessages()[0]! + expect(msgBefore.parts).toHaveLength(0) + + processor.finalizeStream() + + // After finalize, the pending text should be flushed + const msgAfter = processor.getMessages()[0]! + expect(msgAfter.parts).toHaveLength(1) + expect((msgAfter.parts[0] as any).content).toBe('Buffered') + }) + + it('finalizeStream should not fire onStreamEnd if no message was created', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + // No content, just finalize + processor.finalizeStream() + + expect(events.onStreamEnd).not.toHaveBeenCalled() + }) + + it('finalizeStream should force-complete tool calls as safety net', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'getWeather')) + processor.processChunk(ev.toolArgs('tc-1', '{"city":"NYC"}')) + // No TOOL_CALL_END, no RUN_FINISHED + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'input-streaming', + ) + + processor.finalizeStream() + expect(processor.getState().toolCalls.get('tc-1')?.state).toBe( + 'input-complete', + ) + }) + + it('reset should clear all messages and stream state', () => { + const processor = new StreamProcessor() + processor.addUserMessage('Hello') + processor.prepareAssistantMessage() + processor.processChunk(ev.textContent('World')) + processor.processChunk(ev.runFinished('stop')) + processor.finalizeStream() + + expect(processor.getMessages()).toHaveLength(2) + expect(processor.getState().content).toBe('World') + expect(processor.getCurrentAssistantMessageId()).not.toBeNull() + + processor.reset() + + expect(processor.getMessages()).toHaveLength(0) + expect(processor.getCurrentAssistantMessageId()).toBeNull() + expect(processor.getState().content).toBe('') + expect(processor.getState().thinking).toBe('') + expect(processor.getState().toolCalls.size).toBe(0) + expect(processor.getState().finishReason).toBeNull() + expect(processor.getState().done).toBe(false) + }) + + it('chunkStrategy.reset should be called on prepareAssistantMessage', () => { + const resetFn = vi.fn() + const strategy: ChunkStrategy = { + shouldEmit: () => true, + reset: resetFn, + } + const processor = new StreamProcessor({ chunkStrategy: strategy }) + + processor.prepareAssistantMessage() + expect(resetFn).toHaveBeenCalledTimes(1) + }) + + it('TEXT_MESSAGE_START should flush pending text before resetting segment', () => { + const events = spyEvents() + const processor = new StreamProcessor({ events }) + processor.prepareAssistantMessage() + + // Simulate a chunk strategy that buffers + processor.processChunk(ev.textContent('Buffered')) + // The text was emitted (ImmediateStrategy) + + events.onTextUpdate.mockClear() + + // Now TEXT_MESSAGE_START flushes any pending text, then resets + processor.processChunk(ev.textStart('msg-2')) + + // Since ImmediateStrategy already emitted, the flush is a no-op + // (currentSegmentText === lastEmittedText) + // This test verifies the flush logic doesn't crash + expect(processor.getState().content).toBe('Buffered') + }) + }) + + // ========================================================================== + // getState snapshot + // ========================================================================== + describe('getState', () => { + it('should return a complete state snapshot', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.textContent('Some text')) + processor.processChunk(ev.toolStart('tc-1', 'myTool')) + processor.processChunk(ev.toolArgs('tc-1', '{"key":"val"}')) + processor.processChunk(ev.runFinished('tool_calls')) + + const state = processor.getState() + expect(state.content).toBe('Some text') + expect(state.thinking).toBe('') + expect(state.toolCalls.size).toBe(1) + expect(state.toolCallOrder).toEqual(['tc-1']) + expect(state.finishReason).toBe('tool_calls') + expect(state.done).toBe(true) + }) + + it('should return independent copies (mutations do not affect internal state)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.toolStart('tc-1', 'myTool')) + + const state1 = processor.getState() + state1.toolCalls.delete('tc-1') + state1.toolCallOrder.push('fake') + + const state2 = processor.getState() + expect(state2.toolCalls.size).toBe(1) + expect(state2.toolCallOrder).toEqual(['tc-1']) }) }) }) diff --git a/packages/typescript/ai/tests/tool-call-manager.test.ts b/packages/typescript/ai/tests/tool-call-manager.test.ts index 4b372abc..4e47953b 100644 --- a/packages/typescript/ai/tests/tool-call-manager.test.ts +++ b/packages/typescript/ai/tests/tool-call-manager.test.ts @@ -1,7 +1,10 @@ import { describe, expect, it, vi } from 'vitest' import { z } from 'zod' -import { ToolCallManager } from '../src/activities/chat/tools/tool-calls' -import type { RunFinishedEvent, Tool } from '../src/types' +import { + ToolCallManager, + executeToolCalls, +} from '../src/activities/chat/tools/tool-calls' +import type { RunFinishedEvent, Tool, ToolCall } from '../src/types' describe('ToolCallManager', () => { const mockFinishedEvent: RunFinishedEvent = { @@ -373,3 +376,267 @@ describe('ToolCallManager', () => { }) }) }) + +describe('executeToolCalls', () => { + // Client tool (no execute function) with needsApproval + const clientToolWithApproval: Tool = { + name: 'delete_local_data', + description: 'Delete data from local storage', + inputSchema: z.object({ key: z.string() }), + needsApproval: true, + // No execute = client tool + } + + // Client tool (no execute function) without approval + const clientToolWithoutApproval: Tool = { + name: 'get_local_data', + description: 'Get data from local storage', + inputSchema: z.object({ key: z.string() }), + // No execute = client tool, no needsApproval + } + + // Server tool with approval + const serverToolWithApproval: Tool = { + name: 'delete_record', + description: 'Delete a record', + inputSchema: z.object({ id: z.string() }), + needsApproval: true, + execute: vi.fn(() => ({ deleted: true })), + } + + function makeToolCall( + id: string, + name: string, + args: string = '{}', + ): ToolCall { + return { + id, + type: 'function', + function: { name, arguments: args }, + } + } + + describe('client tool with needsApproval', () => { + it('should request approval when no approval decision exists', async () => { + const toolCalls = [ + makeToolCall('call_1', 'delete_local_data', '{"key":"myKey"}'), + ] + + const result = await executeToolCalls( + toolCalls, + [clientToolWithApproval], + new Map(), + new Map(), + ) + + expect(result.needsApproval).toHaveLength(1) + expect(result.needsApproval[0]?.toolCallId).toBe('call_1') + expect(result.needsApproval[0]?.approvalId).toBe('approval_call_1') + expect(result.results).toHaveLength(0) + expect(result.needsClientExecution).toHaveLength(0) + }) + + it('should request client execution after approval when no client result exists', async () => { + const toolCalls = [ + makeToolCall('call_1', 'delete_local_data', '{"key":"myKey"}'), + ] + const approvals = new Map([['approval_call_1', true]]) + + const result = await executeToolCalls( + toolCalls, + [clientToolWithApproval], + approvals, + new Map(), // No client results yet + ) + + // Should request client execution, NOT produce a result + expect(result.needsClientExecution).toHaveLength(1) + expect(result.needsClientExecution[0]?.toolCallId).toBe('call_1') + expect(result.needsClientExecution[0]?.toolName).toBe('delete_local_data') + expect(result.results).toHaveLength(0) + expect(result.needsApproval).toHaveLength(0) + }) + + it('should return result when client has executed after approval', async () => { + const toolCalls = [ + makeToolCall('call_1', 'delete_local_data', '{"key":"myKey"}'), + ] + const approvals = new Map([['approval_call_1', true]]) + const clientResults = new Map([['call_1', { deleted: true }]]) + + const result = await executeToolCalls( + toolCalls, + [clientToolWithApproval], + approvals, + clientResults, + ) + + expect(result.results).toHaveLength(1) + expect(result.results[0]?.toolCallId).toBe('call_1') + expect(result.results[0]?.result).toEqual({ deleted: true }) + expect(result.needsClientExecution).toHaveLength(0) + expect(result.needsApproval).toHaveLength(0) + }) + + it('should return error when user declines approval', async () => { + const toolCalls = [ + makeToolCall('call_1', 'delete_local_data', '{"key":"myKey"}'), + ] + const approvals = new Map([['approval_call_1', false]]) + + const result = await executeToolCalls( + toolCalls, + [clientToolWithApproval], + approvals, + new Map(), + ) + + expect(result.results).toHaveLength(1) + expect(result.results[0]?.result).toEqual({ + error: 'User declined tool execution', + }) + expect(result.results[0]?.state).toBe('output-error') + expect(result.needsClientExecution).toHaveLength(0) + expect(result.needsApproval).toHaveLength(0) + }) + + it('should treat approval response object as a real result if leaked into clientResults (bug scenario)', async () => { + // This test documents the behavior when collectClientState does NOT + // filter out approval response messages. If the pendingExecution marker + // leaks through as a client result, executeToolCalls will incorrectly + // treat it as the tool's real output. The fix is in collectClientState + // which filters these out before they reach executeToolCalls. + const toolCalls = [ + makeToolCall('call_1', 'delete_local_data', '{"key":"myKey"}'), + ] + const approvals = new Map([['approval_call_1', true]]) + // Simulating the bug: approval response leaked into clientResults + const clientResults = new Map([ + [ + 'call_1', + { + approved: true, + pendingExecution: true, + message: 'User approved this action', + }, + ], + ]) + + const result = await executeToolCalls( + toolCalls, + [clientToolWithApproval], + approvals, + clientResults, + ) + + // With the bug, the bogus approval object becomes the "result" + // instead of requesting client execution + expect(result.results).toHaveLength(1) + expect(result.results[0]?.result).toEqual({ + approved: true, + pendingExecution: true, + message: 'User approved this action', + }) + expect(result.needsClientExecution).toHaveLength(0) + }) + }) + + describe('client tool without approval', () => { + it('should request client execution when no result exists', async () => { + const toolCalls = [ + makeToolCall('call_1', 'get_local_data', '{"key":"myKey"}'), + ] + + const result = await executeToolCalls( + toolCalls, + [clientToolWithoutApproval], + new Map(), + new Map(), + ) + + expect(result.needsClientExecution).toHaveLength(1) + expect(result.needsClientExecution[0]?.toolCallId).toBe('call_1') + expect(result.results).toHaveLength(0) + }) + + it('should return result when client has executed', async () => { + const toolCalls = [ + makeToolCall('call_1', 'get_local_data', '{"key":"myKey"}'), + ] + const clientResults = new Map([['call_1', { value: 'stored_data' }]]) + + const result = await executeToolCalls( + toolCalls, + [clientToolWithoutApproval], + new Map(), + clientResults, + ) + + expect(result.results).toHaveLength(1) + expect(result.results[0]?.result).toEqual({ value: 'stored_data' }) + expect(result.needsClientExecution).toHaveLength(0) + }) + }) + + describe('server tool with approval', () => { + it('should request approval when no decision exists', async () => { + const toolCalls = [ + makeToolCall('call_1', 'delete_record', '{"id":"rec_123"}'), + ] + + const result = await executeToolCalls( + toolCalls, + [serverToolWithApproval], + new Map(), + new Map(), + ) + + expect(result.needsApproval).toHaveLength(1) + expect(result.needsApproval[0]?.approvalId).toBe('approval_call_1') + expect(result.results).toHaveLength(0) + }) + + it('should execute server tool after approval', async () => { + const toolCalls = [ + makeToolCall('call_1', 'delete_record', '{"id":"rec_123"}'), + ] + const approvals = new Map([['approval_call_1', true]]) + + const result = await executeToolCalls( + toolCalls, + [serverToolWithApproval], + approvals, + new Map(), + ) + + expect(result.results).toHaveLength(1) + expect(result.results[0]?.result).toEqual({ deleted: true }) + expect(serverToolWithApproval.execute).toHaveBeenCalledWith({ + id: 'rec_123', + }) + }) + }) + + describe('argument normalization', () => { + it('should normalize empty arguments to empty object', async () => { + const tool: Tool = { + name: 'simple_tool', + description: 'A tool with no required args', + inputSchema: z.object({}), + execute: vi.fn(() => ({ done: true })), + } + + const toolCalls = [makeToolCall('call_1', 'simple_tool', '')] + + const result = await executeToolCalls( + toolCalls, + [tool], + new Map(), + new Map(), + ) + + expect(result.results).toHaveLength(1) + expect(tool.execute).toHaveBeenCalledWith({}) + }) + }) +}) diff --git a/packages/typescript/smoke-tests/adapters/src/tests/apr-approval-flow.ts b/packages/typescript/smoke-tests/adapters/src/tests/apr-approval-flow.ts index f4d3e9a0..66db9fe4 100644 --- a/packages/typescript/smoke-tests/adapters/src/tests/apr-approval-flow.ts +++ b/packages/typescript/smoke-tests/adapters/src/tests/apr-approval-flow.ts @@ -119,8 +119,7 @@ export async function runAPR( requestRun.toolCalls.length > 0 && requestRun.approvalRequests.length > 0 && toolExecuteCalled && - toolExecuteCallCount === 1 && - hasHammerInResponse + toolExecuteCallCount === 1 debugData.chunks = [...requestRun.chunks, ...approvedRun.chunks] debugData.finalMessages = approvedRun.reconstructedMessages @@ -140,7 +139,7 @@ export async function runAPR( requestRun.toolCalls.length > 0 }, approvalRequestFound: ${ requestRun.approvalRequests.length > 0 - }, toolExecuteCalled: ${toolExecuteCalled}, toolExecuteCallCount: ${toolExecuteCallCount}, hasHammerInResponse: ${hasHammerInResponse}`, + }, toolExecuteCalled: ${toolExecuteCalled}, toolExecuteCallCount: ${toolExecuteCallCount}`, } await writeDebugFile(adapterContext.adapterName, testName, debugData) diff --git a/packages/typescript/smoke-tests/e2e/package.json b/packages/typescript/smoke-tests/e2e/package.json index d0dc7fca..d39e4974 100644 --- a/packages/typescript/smoke-tests/e2e/package.json +++ b/packages/typescript/smoke-tests/e2e/package.json @@ -18,10 +18,10 @@ "@tanstack/ai-client": "workspace:*", "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-react": "workspace:*", - "@tanstack/nitro-v2-vite-plugin": "^1.141.0", - "@tanstack/react-router": "^1.141.1", - "@tanstack/react-start": "^1.141.1", - "@tanstack/router-plugin": "^1.139.7", + "@tanstack/nitro-v2-vite-plugin": "^1.154.7", + "@tanstack/react-router": "^1.158.4", + "@tanstack/react-start": "^1.159.0", + "@tanstack/router-plugin": "^1.158.4", "@tanstack/tests-adapters": "workspace:*", "react": "^19.2.3", "react-dom": "^19.2.3", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index dc60caa3..b10f0363 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -118,20 +118,20 @@ importers: specifier: ^0.8.2 version: 0.8.4(@types/react-dom@19.2.3(@types/react@19.2.7))(@types/react@19.2.7)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(solid-js@1.9.10) '@tanstack/react-router': - specifier: ^1.141.1 - version: 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + specifier: ^1.158.4 + version: 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-router-devtools': - specifier: ^1.139.7 - version: 1.141.1(@tanstack/react-router@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.157.16)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(solid-js@1.9.10) + specifier: ^1.158.4 + version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-router-ssr-query': - specifier: ^1.139.7 - version: 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.157.16)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + specifier: ^1.158.4 + version: 1.158.4(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-start': - specifier: ^1.141.1 - version: 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.159.0 + version: 1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-plugin': - specifier: ^1.139.7 - version: 1.141.1(@tanstack/react-router@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.158.4 + version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) capnweb: specifier: ^0.1.0 version: 0.1.0 @@ -227,29 +227,29 @@ importers: specifier: workspace:* version: link:../../packages/typescript/ai-react-ui '@tanstack/nitro-v2-vite-plugin': - specifier: ^1.141.0 + specifier: ^1.154.7 version: 1.154.7(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/react-devtools': specifier: ^0.8.2 version: 0.8.4(@types/react-dom@19.2.3(@types/react@19.2.7))(@types/react@19.2.7)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(solid-js@1.9.10) '@tanstack/react-router': - specifier: ^1.141.1 - version: 1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + specifier: ^1.158.4 + version: 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-router-devtools': - specifier: ^1.139.7 - version: 1.157.16(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.157.16)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + specifier: ^1.158.4 + version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-router-ssr-query': - specifier: ^1.139.7 - version: 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.157.16)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + specifier: ^1.158.4 + version: 1.158.4(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-start': - specifier: ^1.141.1 - version: 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.159.0 + version: 1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/react-store': specifier: ^0.8.0 version: 0.8.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/router-plugin': - specifier: ^1.139.7 - version: 1.141.1(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.158.4 + version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/store': specifier: ^0.8.0 version: 0.8.0 @@ -363,11 +363,11 @@ importers: specifier: workspace:* version: link:../../packages/typescript/ai-solid-ui '@tanstack/nitro-v2-vite-plugin': - specifier: ^1.141.0 - version: 1.141.0(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.154.7 + version: 1.154.7(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-plugin': - specifier: ^1.139.7 - version: 1.141.1(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.158.4 + version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/solid-ai-devtools': specifier: workspace:* version: link:../../packages/typescript/solid-ai-devtools @@ -379,13 +379,13 @@ importers: version: 1.141.1(solid-js@1.9.10) '@tanstack/solid-router-devtools': specifier: ^1.139.10 - version: 1.141.1(@tanstack/router-core@1.157.16)(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(csstype@3.2.3)(solid-js@1.9.10) + version: 1.141.1(@tanstack/router-core@1.158.4)(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(csstype@3.2.3)(solid-js@1.9.10) '@tanstack/solid-router-ssr-query': specifier: ^1.139.10 - version: 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.157.16)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.2(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3) + version: 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.2(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3) '@tanstack/solid-start': specifier: ^1.139.10 - version: 1.141.1(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(solid-js@1.9.10)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(solid-js@1.9.10)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/solid-store': specifier: ^0.8.0 version: 0.8.0(solid-js@1.9.10) @@ -1170,17 +1170,17 @@ importers: specifier: workspace:* version: link:../../ai-react '@tanstack/nitro-v2-vite-plugin': - specifier: ^1.141.0 - version: 1.141.0(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.154.7 + version: 1.154.7(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/react-router': - specifier: ^1.141.1 - version: 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + specifier: ^1.158.4 + version: 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-start': - specifier: ^1.141.1 - version: 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.159.0 + version: 1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-plugin': - specifier: ^1.139.7 - version: 1.141.1(@tanstack/react-router@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.158.4 + version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/tests-adapters': specifier: workspace:* version: link:../adapters @@ -1280,17 +1280,17 @@ importers: specifier: workspace:* version: link:../../packages/typescript/ai-react-ui '@tanstack/nitro-v2-vite-plugin': - specifier: ^1.141.0 - version: 1.141.0(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.154.7 + version: 1.154.7(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/react-router': - specifier: ^1.141.1 - version: 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + specifier: ^1.158.4 + version: 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-start': - specifier: ^1.141.1 - version: 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + specifier: ^1.159.0 + version: 1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start': specifier: ^1.120.20 - version: 1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2) + version: 1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2) highlight.js: specifier: ^11.11.1 version: 11.11.1 @@ -3545,12 +3545,6 @@ packages: resolution: {integrity: sha512-xyIfof8eHBuub1CkBnbKNKQXeRZC4dClhmzePHVOEel4G7lk/dW+TQ16da7CFdeNLv6u6Owf5VoBQxoo6DFTSA==} engines: {node: '>=12'} - '@tanstack/nitro-v2-vite-plugin@1.141.0': - resolution: {integrity: sha512-OW0U7ftm4unRKhL9AZoTtdYIT3UnKfY8UQ25QNzI2uUPeVOyE9/yINFEgPGzS6gn4jQmJ3N2QvFZNvEjrbD0iA==} - engines: {node: '>=22.12'} - peerDependencies: - vite: '>=7.0.0' - '@tanstack/nitro-v2-vite-plugin@1.154.7': resolution: {integrity: sha512-THhjYwW+cREhmQyW/iATonY46RwYV8tbMnxBzIu77ceQOIHxkA1kVhLecb/oG5VdTduQnHVe90BD9qohX0mDHg==} engines: {node: '>=22.12'} @@ -3574,32 +3568,20 @@ packages: peerDependencies: react: ^18 || ^19 - '@tanstack/react-router-devtools@1.141.1': - resolution: {integrity: sha512-+XCn9cXSe1fZAD9jRrezEYE0ojn9U+Y0lRTRFdR8n51wx0UzJ6xe/Pewtw0rp03h/zmBR0pX+HRNU9NJDneWGA==} - engines: {node: '>=12'} - peerDependencies: - '@tanstack/react-router': ^1.141.1 - '@tanstack/router-core': ^1.141.1 - react: '>=18.0.0 || >=19.0.0' - react-dom: '>=18.0.0 || >=19.0.0' - peerDependenciesMeta: - '@tanstack/router-core': - optional: true - - '@tanstack/react-router-devtools@1.157.16': - resolution: {integrity: sha512-g6ekyzumfLBX6T5e+Vu2r37Z2CFJKrWRFqIy3vZ6A3x7OcuPV8uXNjyrLSiT/IsGTiF8YzwI4nWJa4fyd7NlCw==} + '@tanstack/react-router-devtools@1.158.4': + resolution: {integrity: sha512-/EkrrJGTPC7MwLfcYYmZM71ANDMLbwcYvBtDA+48LqHUKal8mpWlaodiWdFFnVQ7ny/unbUxljgdrNV9YZiyFQ==} engines: {node: '>=12'} peerDependencies: - '@tanstack/react-router': ^1.157.16 - '@tanstack/router-core': ^1.157.16 + '@tanstack/react-router': ^1.158.4 + '@tanstack/router-core': ^1.158.4 react: '>=18.0.0 || >=19.0.0' react-dom: '>=18.0.0 || >=19.0.0' peerDependenciesMeta: '@tanstack/router-core': optional: true - '@tanstack/react-router-ssr-query@1.141.1': - resolution: {integrity: sha512-80KEKpHx9OZwXcdCOZDyftqxWMP8QHGU6LQgyIsuunPrf76Wf1riyBnZrPOAOJPcjM3PDhTHLNIfmRBnAtItUw==} + '@tanstack/react-router-ssr-query@1.158.4': + resolution: {integrity: sha512-f+XzxO06ILM2i5CGtWqcb3+yaAvp8XgT5hMykKmwwaBnf3Ctc6O8tN/05Ovj0ajXWuROk3HTjg67OcWD7JxI6Q==} engines: {node: '>=12'} peerDependencies: '@tanstack/query-core': '>=5.90.0' @@ -3615,8 +3597,8 @@ packages: react: '>=18.0.0 || >=19.0.0' react-dom: '>=18.0.0 || >=19.0.0' - '@tanstack/react-router@1.157.16': - resolution: {integrity: sha512-xwFQa7S7dhBhm3aJYwU79cITEYgAKSrcL6wokaROIvl2JyIeazn8jueWqUPJzFjv+QF6Q8euKRlKUEyb5q2ymg==} + '@tanstack/react-router@1.158.4': + resolution: {integrity: sha512-i15xXumgvpuM+4NSuIwgouGezuj9eHjZsgpTZSQ7E9pa8rYmhZbWnf8xU68qaLmaKIol/e75o/YzVH2QWHs3iQ==} engines: {node: '>=12'} peerDependencies: react: '>=18.0.0 || >=19.0.0' @@ -3629,6 +3611,13 @@ packages: react: '>=18.0.0 || >=19.0.0' react-dom: '>=18.0.0 || >=19.0.0' + '@tanstack/react-start-client@1.158.4': + resolution: {integrity: sha512-ctEBgpYAPZ3i4EPZlJ45XS/lXPO73MkELec+hXf8NfK0lDQDaUy7LfWu41NPaftdZFJPOncDCfutwpUXD98YlA==} + engines: {node: '>=22.12.0'} + peerDependencies: + react: '>=18.0.0 || >=19.0.0' + react-dom: '>=18.0.0 || >=19.0.0' + '@tanstack/react-start-plugin@1.131.50': resolution: {integrity: sha512-ys+sGvnnE8BUNjGsngg+MGn3F5lV4okL5CWEKFzjBSjQsrTN7apGfmqvBP3O6PkRPHpXZ8X3Z5QsFvSc0CaDRQ==} engines: {node: '>=12'} @@ -3647,8 +3636,15 @@ packages: react: '>=18.0.0 || >=19.0.0' react-dom: '>=18.0.0 || >=19.0.0' - '@tanstack/react-start@1.141.1': - resolution: {integrity: sha512-03iELlg9T9ZN9rKAM1BTCCIBptLbaoZYCZXe0xGf4ZLs3Md+EhmJZibtKluclVQcnjzeiE0T17j1A/YxvVwTZg==} + '@tanstack/react-start-server@1.159.0': + resolution: {integrity: sha512-1nPj7TEOpoIlTW0lftaHuU9Ol1ZDQwRCUWr6UvaPUbapq9nWR8kwYFjyCLbopBjyakFFNgz88/stdbZObt5h2A==} + engines: {node: '>=22.12.0'} + peerDependencies: + react: '>=18.0.0 || >=19.0.0' + react-dom: '>=18.0.0 || >=19.0.0' + + '@tanstack/react-start@1.159.0': + resolution: {integrity: sha512-/ky8Pbu0cmj5dAQfi8LXHpAd/eepyQqDo0eSI/OPYQ2wZ8u8UPwycFvou8t8mq5pkinu+l7JX45UD7mNvzvVNg==} engines: {node: '>=22.12.0'} peerDependencies: react: '>=18.0.0 || >=19.0.0' @@ -3669,8 +3665,8 @@ packages: resolution: {integrity: sha512-fR1GGpp6v3dVKu4KIAjEh+Sd0qGLQd/wvCOVHeopSY6aFidXKCzwrS5cBOBqoPPWTKmn6CdW1a0CzFr5Furdog==} engines: {node: '>=12'} - '@tanstack/router-core@1.157.16': - resolution: {integrity: sha512-eJuVgM7KZYTTr4uPorbUzUflmljMVcaX2g6VvhITLnHmg9SBx9RAgtQ1HmT+72mzyIbRSlQ1q0fY/m+of/fosA==} + '@tanstack/router-core@1.158.4': + resolution: {integrity: sha512-KikgYdyrEFqsjjgv9pMhDTMmASMAyFRvUiKFdQPQtXq3aD1qv/zck4CbA4bfzp9N9nYu/qvWwU1mlYU4u5JeXg==} engines: {node: '>=12'} '@tanstack/router-devtools-core@1.141.1': @@ -3684,11 +3680,11 @@ packages: csstype: optional: true - '@tanstack/router-devtools-core@1.157.16': - resolution: {integrity: sha512-XBJTs/kMZYK6J2zhbGucHNuypwDB1t2vi8K5To+V6dUnLGBEyfQTf01fegiF4rpL1yXgomdGnP6aTiOFgldbVg==} + '@tanstack/router-devtools-core@1.158.4': + resolution: {integrity: sha512-9MKzstYp/6sNRSwJY2b9ipVW8b8/x1iSFNfLhOJur2tnjB3RhwCDfy0u+to70BrRpBEWeq7jvJoVdP029gzUUg==} engines: {node: '>=12'} peerDependencies: - '@tanstack/router-core': ^1.157.16 + '@tanstack/router-core': ^1.158.4 csstype: ^3.0.10 peerDependenciesMeta: csstype: @@ -3702,6 +3698,10 @@ packages: resolution: {integrity: sha512-21RbVAoIDn7s/n/PKMN6U60d5hCeVADrBH/uN6B/poMT4MVYtJXqISVzkc2RAboVRw6eRdYFeF+YlwA3nF6y3Q==} engines: {node: '>=12'} + '@tanstack/router-generator@1.158.4': + resolution: {integrity: sha512-RQmqMTT0oV8dS/3Glcq9SPzDZqOPyKb/LVFUkNoTfMwW88WyGnQcYqZAkmVk/CGBWWDfwObOUZoGq5jTF7bG8w==} + engines: {node: '>=12'} + '@tanstack/router-plugin@1.131.50': resolution: {integrity: sha512-gdEBPGzx7llQNRnaqfPJ1iaPS3oqB8SlvKRG5l7Fxp4q4yINgkeowFYSKEhPOc9bjoNhGrIHOlvPTPXEzAQXzQ==} engines: {node: '>=12'} @@ -3744,6 +3744,27 @@ packages: webpack: optional: true + '@tanstack/router-plugin@1.158.4': + resolution: {integrity: sha512-g2sytAhljw6Jd6Klu37OZ75+o+vhiGdbWtnBy/4rYLC4NN6hSnjgJQRI3+h1CI1KQ4EUgsZYZr/hgE1KHoiWYQ==} + engines: {node: '>=12'} + peerDependencies: + '@rsbuild/core': '>=1.0.2' + '@tanstack/react-router': ^1.158.4 + vite: '>=5.0.0 || >=6.0.0 || >=7.0.0' + vite-plugin-solid: ^2.11.10 + webpack: '>=5.92.0' + peerDependenciesMeta: + '@rsbuild/core': + optional: true + '@tanstack/react-router': + optional: true + vite: + optional: true + vite-plugin-solid: + optional: true + webpack: + optional: true + '@tanstack/router-ssr-query-core@1.141.1': resolution: {integrity: sha512-bkRXUhktifxBewnnphH59E0sGcsUI1NmNqxzCAmXIb93xYgafhjUGGYwfK6FqFBOmCB5isr32exGO3+UMHJr/A==} engines: {node: '>=12'} @@ -3751,6 +3772,13 @@ packages: '@tanstack/query-core': '>=5.90.0' '@tanstack/router-core': '>=1.127.0' + '@tanstack/router-ssr-query-core@1.158.4': + resolution: {integrity: sha512-gZRx0pGaRc7NPrwQSAfnn/DVWEsd01cf5TaW5yTyf3R5ZP/I++KNEW3lBXyRo1RyKedPC45R+Id6HpDeEaidyg==} + engines: {node: '>=12'} + peerDependencies: + '@tanstack/query-core': '>=5.90.0' + '@tanstack/router-core': '>=1.127.0' + '@tanstack/router-utils@1.131.2': resolution: {integrity: sha512-sr3x0d2sx9YIJoVth0QnfEcAcl+39sQYaNQxThtHmRpyeFYNyM2TTH+Ud3TNEnI3bbzmLYEUD+7YqB987GzhDA==} engines: {node: '>=12'} @@ -3759,6 +3787,10 @@ packages: resolution: {integrity: sha512-/eFGKCiix1SvjxwgzrmH4pHjMiMxc+GA4nIbgEkG2RdAJqyxLcRhd7RPLG0/LZaJ7d0ad3jrtRqsHLv2152Vbw==} engines: {node: '>=12'} + '@tanstack/router-utils@1.158.0': + resolution: {integrity: sha512-qZ76eaLKU6Ae9iI/mc5zizBX149DXXZkBVVO3/QRIll79uKLJZHQlMKR++2ba7JsciBWz1pgpIBcCJPE9S0LVg==} + engines: {node: '>=12'} + '@tanstack/server-functions-plugin@1.131.2': resolution: {integrity: sha512-hWsaSgEZAVyzHg8+IcJWCEtfI9ZSlNELErfLiGHG9XCHEXMegFWsrESsKHlASzJqef9RsuOLDl+1IMPIskwdDw==} engines: {node: '>=12'} @@ -3840,6 +3872,10 @@ packages: resolution: {integrity: sha512-Rk/b0ekX7p0ZBKOg9WM5c632YPqu7GlvZSYnAjNi1GDp1/sET6g2Trp+GAjs1s8kakp2pMQ4sZUG/11grCMfJw==} engines: {node: '>=22.12.0'} + '@tanstack/start-client-core@1.158.4': + resolution: {integrity: sha512-qpUYwJMMCEKgJuMz2CJLt53XrObi1BSjV1gG5SgBWRRVOHL8zky55tu1fEqHEa26jTTA6mUcBnPzYE8vIjRpAw==} + engines: {node: '>=22.12.0'} + '@tanstack/start-config@1.120.20': resolution: {integrity: sha512-oH/mfTSHV8Qbil74tWicPLW6+kKmT3esXCnDzvrkhi3+N8ZuVUDr01Qpil0Wxf9lLPfM5L6VX03nF4hSU8vljg==} engines: {node: '>=12'} @@ -3848,6 +3884,10 @@ packages: react-dom: '>=18.0.0 || >=19.0.0' vite: ^6.0.0 + '@tanstack/start-fn-stubs@1.154.7': + resolution: {integrity: sha512-D69B78L6pcFN5X5PHaydv7CScQcKLzJeEYqs7jpuyyqGQHSUIZUjS955j+Sir8cHhuDIovCe2LmsYHeZfWf3dQ==} + engines: {node: '>=22.12.0'} + '@tanstack/start-plugin-core@1.131.50': resolution: {integrity: sha512-eFvMA0chqLtHbq+8ojp1fXN7AQjhmeoOpQaZaU1d51wb7ugetrn0k3OuHblxtE/O0L4HEC9s4X5zmFJt0vLh0w==} engines: {node: '>=12'} @@ -3860,6 +3900,12 @@ packages: peerDependencies: vite: '>=7.0.0' + '@tanstack/start-plugin-core@1.159.0': + resolution: {integrity: sha512-HGcji+Mhste9mDKUlKpRPfoIOaURr7UqQZ3AMb+6zpbXumc+apYW/CvlvWdF/hoZGBSVAniFpwXgV5L5IimnhA==} + engines: {node: '>=22.12.0'} + peerDependencies: + vite: '>=7.0.0' + '@tanstack/start-server-core@1.131.50': resolution: {integrity: sha512-3SWwwhW2GKMhPSaqWRal6Jj1Y9ObfdWEXKFQid1LBuk5xk/Es4bmW68o++MbVgs/GxUxyeZ3TRVqb0c7RG1sog==} engines: {node: '>=12'} @@ -3868,6 +3914,10 @@ packages: resolution: {integrity: sha512-Qk/lZ/+iGUyNYeAAuj89bLR6GXLD/9BIpAR2CUwlS+xXGL0kQmOFcb1UvccWZ2QwtW+csxJW4NeQOeMuqsfyhA==} engines: {node: '>=22.12.0'} + '@tanstack/start-server-core@1.159.0': + resolution: {integrity: sha512-oE9UkWc7uIDvjAOsmzZ65Vz+JLb4S+bhMLGjx84lWY0G+GelJJvdr0rQiUFTWPIsbIxO2pdyIY995H55VUcowg==} + engines: {node: '>=22.12.0'} + '@tanstack/start-server-functions-client@1.131.50': resolution: {integrity: sha512-4aM17fFdVAFH6uLPswKJxzrhhIjcCwKqzfTcgY3OnhUKnaZBTQwJA+nUHQCI6IWvEvrcrNVtFTtv13TkDk3YMw==} engines: {node: '>=12'} @@ -3896,6 +3946,10 @@ packages: resolution: {integrity: sha512-UPOQd4qsytgmc+pHeeS3oIZQazhyGAmEaCS/IrZI42TzpuVh2ZbLVssKEoDziheNP1dH5KT2lsL1bU9asAw7tA==} engines: {node: '>=22.12.0'} + '@tanstack/start-storage-context@1.158.4': + resolution: {integrity: sha512-tz70q/6LTytstBIMRYt5GDRjPJPOHjnPNay85RJdq9ZlQKryeDThnshEttlBTDAxZP7wtwOv00lcAgFLFGP1hA==} + engines: {node: '>=22.12.0'} + '@tanstack/start@1.120.20': resolution: {integrity: sha512-fQO+O/5xJpli5KlV6pwDz6DtpbqO/0atdVSyVnkemzk0Mej9azm4HXtw+cKkIPtsSplWs4B1EbMtgGMb9ADhSA==} engines: {node: '>=12'} @@ -3918,6 +3972,10 @@ packages: resolution: {integrity: sha512-CJrWtr6L9TVzEImm9S7dQINx+xJcYP/aDkIi6gnaWtIgbZs1pnzsE0yJc2noqXZ+yAOqLx3TBGpBEs9tS0P9/A==} engines: {node: '>=12'} + '@tanstack/virtual-file-routes@1.154.7': + resolution: {integrity: sha512-cHHDnewHozgjpI+MIVp9tcib6lYEQK5MyUr0ChHpHFGBl8Xei55rohFK0I0ve/GKoHeioaK42Smd8OixPp6CTg==} + engines: {node: '>=12'} + '@tanstack/vite-config@0.4.1': resolution: {integrity: sha512-FOl8EF6SAcljanKSm5aBeJaflFcxQAytTbxtNW8HC6D4x+UBW68IC4tBcrlrsI0wXHBmC/Gz4Ovvv8qCtiXSgQ==} engines: {node: '>=18'} @@ -4613,6 +4671,9 @@ packages: babel-dead-code-elimination@1.0.10: resolution: {integrity: sha512-DV5bdJZTzZ0zn0DC24v3jD7Mnidh6xhKa4GfKCbq3sfW8kaWhDdZjP3i81geA8T33tdYqWKw4D3fVv0CwEgKVA==} + babel-dead-code-elimination@1.0.12: + resolution: {integrity: sha512-GERT7L2TiYcYDtYk1IpD+ASAYXjKbLTDPhBtYj7X1NuRMDTMtAx9kyBenub1Ev41lo91OHCKdmP+egTDmfQ7Ig==} + babel-plugin-jsx-dom-expressions@0.40.3: resolution: {integrity: sha512-5HOwwt0BYiv/zxl7j8Pf2bGL6rDXfV6nUhLs8ygBX+EFJXzBPHM/euj9j/6deMZ6wa52Wb2PBaAV5U/jKwIY1w==} peerDependencies: @@ -5778,6 +5839,15 @@ packages: crossws: optional: true + h3@2.0.1-rc.11: + resolution: {integrity: sha512-2myzjCqy32c1As9TjZW9fNZXtLqNedjFSrdFy2AjFBQQ3LzrnGoDdFDYfC0tV2e4vcyfJ2Sfo/F6NQhO2Ly/Mw==} + engines: {node: '>=20.11.1'} + peerDependencies: + crossws: ^0.4.1 + peerDependenciesMeta: + crossws: + optional: true + happy-dom@20.0.11: resolution: {integrity: sha512-QsCdAUHAmiDeKeaNojb1OHOPF7NjcWPBR7obdu3NwH2a/oyQaLg5d0aaCy/9My6CdPChYF07dvz5chaXBGaD4g==} engines: {node: '>=20.0.0'} @@ -7698,6 +7768,11 @@ packages: sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + srvx@0.10.1: + resolution: {integrity: sha512-A//xtfak4eESMWWydSRFUVvCTQbSwivnGCEf8YGPe2eHU0+Z6znfUTCPF0a7oV3sObSOcrXHlL6Bs9vVctfXdg==} + engines: {node: '>=20.16.0'} + hasBin: true + srvx@0.8.16: resolution: {integrity: sha512-hmcGW4CgroeSmzgF1Ihwgl+Ths0JqAJ7HwjP2X7e3JzY7u4IydLMcdnlqGQiQGUswz+PO9oh/KtCpOISIvs9QQ==} engines: {node: '>=20.16.0'} @@ -10815,42 +10890,6 @@ snapshots: '@tanstack/history@1.154.14': {} - '@tanstack/nitro-v2-vite-plugin@1.141.0(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': - dependencies: - nitropack: 2.12.9(rolldown@1.0.0-beta.53) - pathe: 2.0.3 - vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - transitivePeerDependencies: - - '@azure/app-configuration' - - '@azure/cosmos' - - '@azure/data-tables' - - '@azure/identity' - - '@azure/keyvault-secrets' - - '@azure/storage-blob' - - '@capacitor/preferences' - - '@deno/kv' - - '@electric-sql/pglite' - - '@libsql/client' - - '@netlify/blobs' - - '@planetscale/database' - - '@upstash/redis' - - '@vercel/blob' - - '@vercel/functions' - - '@vercel/kv' - - aws4fetch - - bare-abort-controller - - better-sqlite3 - - drizzle-orm - - encoding - - idb-keyval - - mysql2 - - react-native-b4a - - rolldown - - sqlite3 - - supports-color - - uploadthing - - xml2js - '@tanstack/nitro-v2-vite-plugin@1.154.7(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: nitropack: 2.13.1(rolldown@1.0.0-beta.53) @@ -10907,46 +10946,23 @@ snapshots: '@tanstack/query-core': 5.90.12 react: 19.2.3 - '@tanstack/react-router-devtools@1.141.1(@tanstack/react-router@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.157.16)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(solid-js@1.9.10)': + '@tanstack/react-router-devtools@1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: - '@tanstack/react-router': 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-devtools-core': 1.141.1(@tanstack/router-core@1.157.16)(csstype@3.2.3)(solid-js@1.9.10) + '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/router-devtools-core': 1.158.4(@tanstack/router-core@1.158.4)(csstype@3.2.3) react: 19.2.3 react-dom: 19.2.3(react@19.2.3) optionalDependencies: - '@tanstack/router-core': 1.157.16 + '@tanstack/router-core': 1.158.4 transitivePeerDependencies: - csstype - - solid-js - '@tanstack/react-router-devtools@1.157.16(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.157.16)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': - dependencies: - '@tanstack/react-router': 1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-devtools-core': 1.157.16(@tanstack/router-core@1.157.16)(csstype@3.2.3) - react: 19.2.3 - react-dom: 19.2.3(react@19.2.3) - optionalDependencies: - '@tanstack/router-core': 1.157.16 - transitivePeerDependencies: - - csstype - - '@tanstack/react-router-ssr-query@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.157.16)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + '@tanstack/react-router-ssr-query@1.158.4(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: '@tanstack/query-core': 5.90.12 '@tanstack/react-query': 5.90.12(react@19.2.3) - '@tanstack/react-router': 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-ssr-query-core': 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.157.16) - react: 19.2.3 - react-dom: 19.2.3(react@19.2.3) - transitivePeerDependencies: - - '@tanstack/router-core' - - '@tanstack/react-router-ssr-query@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.157.16)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': - dependencies: - '@tanstack/query-core': 5.90.12 - '@tanstack/react-query': 5.90.12(react@19.2.3) - '@tanstack/react-router': 1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-ssr-query-core': 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.157.16) + '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/router-ssr-query-core': 1.158.4(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4) react: 19.2.3 react-dom: 19.2.3(react@19.2.3) transitivePeerDependencies: @@ -10963,11 +10979,11 @@ snapshots: tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + '@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: '@tanstack/history': 1.154.14 '@tanstack/react-store': 0.8.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-core': 1.157.16 + '@tanstack/router-core': 1.158.4 isbot: 5.1.32 react: 19.2.3 react-dom: 19.2.3(react@19.2.3) @@ -10984,9 +11000,19 @@ snapshots: tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/react-start-plugin@1.131.50(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@vitejs/plugin-react@4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/react-start-client@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/router-core': 1.158.4 + '@tanstack/start-client-core': 1.158.4 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + tiny-invariant: 1.3.3 + tiny-warning: 1.0.3 + + '@tanstack/react-start-plugin@1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@vitejs/plugin-react@4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: - '@tanstack/start-plugin-core': 1.131.50(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/start-plugin-core': 1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@vitejs/plugin-react': 4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) pathe: 2.0.3 vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -11026,11 +11052,11 @@ snapshots: - webpack - xml2js - '@tanstack/react-start-router-manifest@1.120.19(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)': + '@tanstack/react-start-router-manifest@1.120.19(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)': dependencies: '@tanstack/router-core': 1.141.1 tiny-invariant: 1.3.3 - vinxi: 0.5.3(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vinxi: 0.5.3(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - '@azure/app-configuration' - '@azure/cosmos' @@ -11088,15 +11114,27 @@ snapshots: transitivePeerDependencies: - crossws - '@tanstack/react-start@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/react-start-server@1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: - '@tanstack/react-router': 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/react-start-client': 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/react-start-server': 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-utils': 1.141.0 - '@tanstack/start-client-core': 1.141.1 - '@tanstack/start-plugin-core': 1.141.1(@tanstack/react-router@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@tanstack/start-server-core': 1.141.1 + '@tanstack/history': 1.154.14 + '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/router-core': 1.158.4 + '@tanstack/start-client-core': 1.158.4 + '@tanstack/start-server-core': 1.159.0 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + transitivePeerDependencies: + - crossws + + '@tanstack/react-start@1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + dependencies: + '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-start-client': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-start-server': 1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/router-utils': 1.158.0 + '@tanstack/start-client-core': 1.158.4 + '@tanstack/start-plugin-core': 1.159.0(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/start-server-core': 1.159.0 pathe: 2.0.3 react: 19.2.3 react-dom: 19.2.3(react@19.2.3) @@ -11120,8 +11158,8 @@ snapshots: '@tanstack/history': 1.131.2 '@tanstack/store': 0.7.7 cookie-es: 1.2.2 - seroval: 1.4.0 - seroval-plugins: 1.4.0(seroval@1.4.0) + seroval: 1.5.0 + seroval-plugins: 1.5.0(seroval@1.5.0) tiny-invariant: 1.3.3 tiny-warning: 1.0.3 @@ -11135,7 +11173,7 @@ snapshots: tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/router-core@1.157.16': + '@tanstack/router-core@1.158.4': dependencies: '@tanstack/history': 1.154.14 '@tanstack/store': 0.8.0 @@ -11145,9 +11183,9 @@ snapshots: tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/router-devtools-core@1.141.1(@tanstack/router-core@1.157.16)(csstype@3.2.3)(solid-js@1.9.10)': + '@tanstack/router-devtools-core@1.141.1(@tanstack/router-core@1.158.4)(csstype@3.2.3)(solid-js@1.9.10)': dependencies: - '@tanstack/router-core': 1.157.16 + '@tanstack/router-core': 1.158.4 clsx: 2.1.1 goober: 2.1.18(csstype@3.2.3) solid-js: 1.9.10 @@ -11155,9 +11193,9 @@ snapshots: optionalDependencies: csstype: 3.2.3 - '@tanstack/router-devtools-core@1.157.16(@tanstack/router-core@1.157.16)(csstype@3.2.3)': + '@tanstack/router-devtools-core@1.158.4(@tanstack/router-core@1.158.4)(csstype@3.2.3)': dependencies: - '@tanstack/router-core': 1.157.16 + '@tanstack/router-core': 1.158.4 clsx: 2.1.1 goober: 2.1.18(csstype@3.2.3) tiny-invariant: 1.3.3 @@ -11190,7 +11228,20 @@ snapshots: transitivePeerDependencies: - supports-color - '@tanstack/router-plugin@1.131.50(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/router-generator@1.158.4': + dependencies: + '@tanstack/router-core': 1.158.4 + '@tanstack/router-utils': 1.158.0 + '@tanstack/virtual-file-routes': 1.154.7 + prettier: 3.7.4 + recast: 0.23.11 + source-map: 0.7.6 + tsx: 4.21.0 + zod: 3.25.76 + transitivePeerDependencies: + - supports-color + + '@tanstack/router-plugin@1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/core': 7.28.5 '@babel/plugin-syntax-jsx': 7.27.1(@babel/core@7.28.5) @@ -11207,13 +11258,13 @@ snapshots: unplugin: 2.3.11 zod: 3.25.76 optionalDependencies: - '@tanstack/react-router': 1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vite-plugin-solid: 2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) transitivePeerDependencies: - supports-color - '@tanstack/router-plugin@1.141.1(@tanstack/react-router@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/router-plugin@1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/core': 7.28.5 '@babel/plugin-syntax-jsx': 7.27.1(@babel/core@7.28.5) @@ -11230,13 +11281,13 @@ snapshots: unplugin: 2.3.11 zod: 3.25.76 optionalDependencies: - '@tanstack/react-router': 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vite-plugin-solid: 2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) transitivePeerDependencies: - supports-color - '@tanstack/router-plugin@1.141.1(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/router-plugin@1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/core': 7.28.5 '@babel/plugin-syntax-jsx': 7.27.1(@babel/core@7.28.5) @@ -11244,25 +11295,29 @@ snapshots: '@babel/template': 7.27.2 '@babel/traverse': 7.28.5 '@babel/types': 7.28.5 - '@tanstack/router-core': 1.141.1 - '@tanstack/router-generator': 1.141.1 - '@tanstack/router-utils': 1.141.0 - '@tanstack/virtual-file-routes': 1.141.0 - babel-dead-code-elimination: 1.0.10 + '@tanstack/router-core': 1.158.4 + '@tanstack/router-generator': 1.158.4 + '@tanstack/router-utils': 1.158.0 + '@tanstack/virtual-file-routes': 1.154.7 chokidar: 3.6.0 unplugin: 2.3.11 zod: 3.25.76 optionalDependencies: - '@tanstack/react-router': 1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vite-plugin-solid: 2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) transitivePeerDependencies: - supports-color - '@tanstack/router-ssr-query-core@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.157.16)': + '@tanstack/router-ssr-query-core@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4)': dependencies: '@tanstack/query-core': 5.90.12 - '@tanstack/router-core': 1.157.16 + '@tanstack/router-core': 1.158.4 + + '@tanstack/router-ssr-query-core@1.158.4(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4)': + dependencies: + '@tanstack/query-core': 5.90.12 + '@tanstack/router-core': 1.158.4 '@tanstack/router-utils@1.131.2': dependencies: @@ -11288,6 +11343,20 @@ snapshots: transitivePeerDependencies: - supports-color + '@tanstack/router-utils@1.158.0': + dependencies: + '@babel/core': 7.28.5 + '@babel/generator': 7.28.5 + '@babel/parser': 7.28.5 + '@babel/types': 7.28.5 + ansis: 4.2.0 + babel-dead-code-elimination: 1.0.12 + diff: 8.0.2 + pathe: 2.0.3 + tinyglobby: 0.2.15 + transitivePeerDependencies: + - supports-color + '@tanstack/server-functions-plugin@1.131.2(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/code-frame': 7.27.1 @@ -11334,20 +11403,20 @@ snapshots: '@tanstack/query-core': 5.90.12 solid-js: 1.9.10 - '@tanstack/solid-router-devtools@1.141.1(@tanstack/router-core@1.157.16)(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(csstype@3.2.3)(solid-js@1.9.10)': + '@tanstack/solid-router-devtools@1.141.1(@tanstack/router-core@1.158.4)(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(csstype@3.2.3)(solid-js@1.9.10)': dependencies: - '@tanstack/router-devtools-core': 1.141.1(@tanstack/router-core@1.157.16)(csstype@3.2.3)(solid-js@1.9.10) + '@tanstack/router-devtools-core': 1.141.1(@tanstack/router-core@1.158.4)(csstype@3.2.3)(solid-js@1.9.10) '@tanstack/solid-router': 1.141.1(solid-js@1.9.10) solid-js: 1.9.10 optionalDependencies: - '@tanstack/router-core': 1.157.16 + '@tanstack/router-core': 1.158.4 transitivePeerDependencies: - csstype - '@tanstack/solid-router-ssr-query@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.157.16)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.2(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3)': + '@tanstack/solid-router-ssr-query@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.2(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3)': dependencies: '@tanstack/query-core': 5.90.12 - '@tanstack/router-ssr-query-core': 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.157.16) + '@tanstack/router-ssr-query-core': 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4) '@tanstack/solid-query': 5.90.15(solid-js@1.9.10) '@tanstack/solid-router': 1.141.1(solid-js@1.9.10) eslint-plugin-solid: 0.14.5(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) @@ -11392,13 +11461,13 @@ snapshots: transitivePeerDependencies: - crossws - '@tanstack/solid-start@1.141.1(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(solid-js@1.9.10)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/solid-start@1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(solid-js@1.9.10)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@tanstack/solid-router': 1.141.1(solid-js@1.9.10) '@tanstack/solid-start-client': 1.141.1(solid-js@1.9.10) '@tanstack/solid-start-server': 1.141.1(solid-js@1.9.10) '@tanstack/start-client-core': 1.141.1 - '@tanstack/start-plugin-core': 1.141.1(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/start-plugin-core': 1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start-server-core': 1.141.1 pathe: 2.0.3 solid-js: 1.9.10 @@ -11416,11 +11485,11 @@ snapshots: '@tanstack/store': 0.8.0 solid-js: 1.9.10 - '@tanstack/start-api-routes@1.120.19(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)': + '@tanstack/start-api-routes@1.120.19(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)': dependencies: '@tanstack/router-core': 1.141.1 '@tanstack/start-server-core': 1.141.1 - vinxi: 0.5.3(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vinxi: 0.5.3(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - '@azure/app-configuration' - '@azure/cosmos' @@ -11483,12 +11552,21 @@ snapshots: tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/start-config@1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2)': + '@tanstack/start-client-core@1.158.4': dependencies: - '@tanstack/react-router': 1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/react-start-plugin': 1.131.50(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@vitejs/plugin-react@4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/router-core': 1.158.4 + '@tanstack/start-fn-stubs': 1.154.7 + '@tanstack/start-storage-context': 1.158.4 + seroval: 1.5.0 + tiny-invariant: 1.3.3 + tiny-warning: 1.0.3 + + '@tanstack/start-config@1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2)': + dependencies: + '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-start-plugin': 1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@vitejs/plugin-react@4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-generator': 1.141.1 - '@tanstack/router-plugin': 1.141.1(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/router-plugin': 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/server-functions-plugin': 1.141.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start-server-functions-handler': 1.120.19 '@vitejs/plugin-react': 4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) @@ -11497,7 +11575,7 @@ snapshots: ofetch: 1.5.1 react: 19.2.3 react-dom: 19.2.3(react@19.2.3) - vinxi: 0.5.3(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vinxi: 0.5.3(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) zod: 3.25.76 transitivePeerDependencies: @@ -11549,14 +11627,16 @@ snapshots: - xml2js - yaml - '@tanstack/start-plugin-core@1.131.50(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/start-fn-stubs@1.154.7': {} + + '@tanstack/start-plugin-core@1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/code-frame': 7.26.2 '@babel/core': 7.28.5 '@babel/types': 7.28.5 '@tanstack/router-core': 1.131.50 '@tanstack/router-generator': 1.131.50 - '@tanstack/router-plugin': 1.131.50(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/router-plugin': 1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-utils': 1.131.2 '@tanstack/server-functions-plugin': 1.131.2(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start-server-core': 1.131.50 @@ -11565,9 +11645,9 @@ snapshots: babel-dead-code-elimination: 1.0.10 cheerio: 1.1.2 h3: 1.13.0 - nitropack: 2.12.9(rolldown@1.0.0-beta.53) + nitropack: 2.13.1(rolldown@1.0.0-beta.53) pathe: 2.0.3 - ufo: 1.6.1 + ufo: 1.6.3 vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vitefu: 1.1.1(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) xmlbuilder2: 3.1.1 @@ -11607,7 +11687,7 @@ snapshots: - webpack - xml2js - '@tanstack/start-plugin-core@1.141.1(@tanstack/react-router@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/start-plugin-core@1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/code-frame': 7.26.2 '@babel/core': 7.28.5 @@ -11615,7 +11695,7 @@ snapshots: '@rolldown/pluginutils': 1.0.0-beta.40 '@tanstack/router-core': 1.141.1 '@tanstack/router-generator': 1.141.1 - '@tanstack/router-plugin': 1.141.1(@tanstack/react-router@1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/router-plugin': 1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-utils': 1.141.0 '@tanstack/server-functions-plugin': 1.141.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start-client-core': 1.141.1 @@ -11639,26 +11719,24 @@ snapshots: - vite-plugin-solid - webpack - '@tanstack/start-plugin-core@1.141.1(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/start-plugin-core@1.159.0(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: - '@babel/code-frame': 7.26.2 + '@babel/code-frame': 7.27.1 '@babel/core': 7.28.5 '@babel/types': 7.28.5 '@rolldown/pluginutils': 1.0.0-beta.40 - '@tanstack/router-core': 1.141.1 - '@tanstack/router-generator': 1.141.1 - '@tanstack/router-plugin': 1.141.1(@tanstack/react-router@1.157.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@tanstack/router-utils': 1.141.0 - '@tanstack/server-functions-plugin': 1.141.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@tanstack/start-client-core': 1.141.1 - '@tanstack/start-server-core': 1.141.1 - babel-dead-code-elimination: 1.0.10 + '@tanstack/router-core': 1.158.4 + '@tanstack/router-generator': 1.158.4 + '@tanstack/router-plugin': 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/router-utils': 1.158.0 + '@tanstack/start-client-core': 1.158.4 + '@tanstack/start-server-core': 1.159.0 cheerio: 1.1.2 exsolve: 1.0.8 pathe: 2.0.3 - srvx: 0.8.16 + srvx: 0.10.1 tinyglobby: 0.2.15 - ufo: 1.6.1 + ufo: 1.6.3 vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vitefu: 1.1.1(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) xmlbuilder2: 4.0.3 @@ -11681,7 +11759,7 @@ snapshots: isbot: 5.1.32 tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - unctx: 2.4.1 + unctx: 2.5.0 '@tanstack/start-server-core@1.141.1': dependencies: @@ -11695,6 +11773,18 @@ snapshots: transitivePeerDependencies: - crossws + '@tanstack/start-server-core@1.159.0': + dependencies: + '@tanstack/history': 1.154.14 + '@tanstack/router-core': 1.158.4 + '@tanstack/start-client-core': 1.158.4 + '@tanstack/start-storage-context': 1.158.4 + h3-v2: h3@2.0.1-rc.11 + seroval: 1.5.0 + tiny-invariant: 1.3.3 + transitivePeerDependencies: + - crossws + '@tanstack/start-server-functions-client@1.131.50(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@tanstack/server-functions-plugin': 1.131.2(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) @@ -11745,13 +11835,17 @@ snapshots: dependencies: '@tanstack/router-core': 1.141.1 - '@tanstack/start@1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2)': + '@tanstack/start-storage-context@1.158.4': + dependencies: + '@tanstack/router-core': 1.158.4 + + '@tanstack/start@1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2)': dependencies: '@tanstack/react-start-client': 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/react-start-router-manifest': 1.120.19(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + '@tanstack/react-start-router-manifest': 1.120.19(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) '@tanstack/react-start-server': 1.141.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/start-api-routes': 1.120.19(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - '@tanstack/start-config': 1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2) + '@tanstack/start-api-routes': 1.120.19(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + '@tanstack/start-config': 1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2) '@tanstack/start-server-functions-client': 1.131.50(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start-server-functions-handler': 1.120.19 '@tanstack/start-server-functions-server': 1.131.2(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) @@ -11824,6 +11918,8 @@ snapshots: '@tanstack/virtual-file-routes@1.141.0': {} + '@tanstack/virtual-file-routes@1.154.7': {} + '@tanstack/vite-config@0.4.1(@types/node@24.10.3)(rollup@4.55.1)(typescript@5.9.3)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: rollup-plugin-preserve-directives: 0.4.0(rollup@4.55.1) @@ -12208,7 +12304,7 @@ snapshots: node-forge: 1.3.3 pathe: 1.1.2 std-env: 3.10.0 - ufo: 1.6.1 + ufo: 1.6.3 untun: 0.1.3 uqr: 0.1.2 @@ -12747,6 +12843,15 @@ snapshots: transitivePeerDependencies: - supports-color + babel-dead-code-elimination@1.0.12: + dependencies: + '@babel/core': 7.28.5 + '@babel/parser': 7.28.5 + '@babel/traverse': 7.28.5 + '@babel/types': 7.28.5 + transitivePeerDependencies: + - supports-color + babel-plugin-jsx-dom-expressions@0.40.3(@babel/core@7.28.5): dependencies: '@babel/core': 7.28.5 @@ -14090,7 +14195,7 @@ snapshots: iron-webcrypto: 1.2.1 ohash: 1.1.6 radix3: 1.1.2 - ufo: 1.6.1 + ufo: 1.6.3 uncrypto: 0.1.3 unenv: 1.10.0 @@ -14125,6 +14230,11 @@ snapshots: rou3: 0.7.12 srvx: 0.8.16 + h3@2.0.1-rc.11: + dependencies: + rou3: 0.7.12 + srvx: 0.10.1 + happy-dom@20.0.11: dependencies: '@types/node': 20.19.26 @@ -16623,6 +16733,8 @@ snapshots: sprintf-js@1.0.3: {} + srvx@0.10.1: {} + srvx@0.8.16: {} stable-hash-x@0.2.0: {} @@ -17162,20 +17274,6 @@ snapshots: db0: 0.3.4 ioredis: 5.8.2 - unstorage@1.17.3(db0@0.3.4)(ioredis@5.9.2): - dependencies: - anymatch: 3.1.3 - chokidar: 4.0.3 - destr: 2.0.5 - h3: 1.15.4 - lru-cache: 10.4.3 - node-fetch-native: 1.6.7 - ofetch: 1.5.1 - ufo: 1.6.1 - optionalDependencies: - db0: 0.3.4 - ioredis: 5.9.2 - unstorage@1.17.4(db0@0.3.4)(ioredis@5.9.2): dependencies: anymatch: 3.1.3 @@ -17257,7 +17355,7 @@ snapshots: '@types/unist': 3.0.3 vfile-message: 4.0.3 - vinxi@0.5.3(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.9.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): + vinxi@0.5.3(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: '@babel/core': 7.28.5 '@babel/plugin-syntax-jsx': 7.27.1(@babel/core@7.28.5) @@ -17279,7 +17377,7 @@ snapshots: hookable: 5.5.3 http-proxy: 1.18.1 micromatch: 4.0.8 - nitropack: 2.12.9(rolldown@1.0.0-beta.53) + nitropack: 2.13.1(rolldown@1.0.0-beta.53) node-fetch-native: 1.6.7 path-to-regexp: 6.3.0 pathe: 1.1.2 @@ -17290,7 +17388,7 @@ snapshots: ufo: 1.6.1 unctx: 2.4.1 unenv: 1.10.0 - unstorage: 1.17.3(db0@0.3.4)(ioredis@5.9.2) + unstorage: 1.17.3(db0@0.3.4)(ioredis@5.8.2) vite: 6.4.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) zod: 3.25.76 transitivePeerDependencies: diff --git a/scripts/fix-version-bump.ts b/scripts/fix-version-bump.ts new file mode 100644 index 00000000..8a5c3103 --- /dev/null +++ b/scripts/fix-version-bump.ts @@ -0,0 +1,156 @@ +import { readFileSync, writeFileSync } from 'node:fs' +import { resolve } from 'node:path' +import { glob } from 'tinyglobby' + +const WRONG_VERSION = '1.0.0' + +interface PackageToFix { + name: string + packageJsonPath: string + changelogPath: string + detectedVersion: string | null +} + +function parseArgs(): { version: string | null } { + const args = process.argv.slice(2) + const versionIndex = args.findIndex( + (arg) => arg === '--version' || arg === '-v', + ) + + if (versionIndex !== -1 && args[versionIndex + 1]) { + return { version: args[versionIndex + 1] } + } + + return { version: null } +} + +function detectVersionFromChangelog(changelogPath: string): string | null { + try { + const content = readFileSync(changelogPath, 'utf-8') + + // Look for "Updated dependencies" section and extract version numbers + // Pattern: - @tanstack/package-name@X.Y.Z + const dependencyPattern = /-\s+@tanstack\/[\w-]+@(\d+\.\d+\.\d+)/g + const matches = [...content.matchAll(dependencyPattern)] + + if (matches.length > 0) { + // Get the first dependency version (they should all be the same in a changeset bump) + const version = matches[0][1] + // Make sure it's not also 1.0.0 + if (version !== WRONG_VERSION) { + return version + } + } + + return null + } catch { + return null + } +} + +function fixPackageJson(path: string, newVersion: string): void { + const content = readFileSync(path, 'utf-8') + const updated = content.replace( + /"version":\s*"1\.0\.0"/, + `"version": "${newVersion}"`, + ) + writeFileSync(path, updated) +} + +function fixChangelog(path: string, newVersion: string): void { + const content = readFileSync(path, 'utf-8') + // Replace the first occurrence of "## 1.0.0" with the correct version + const updated = content.replace(/^## 1\.0\.0$/m, `## ${newVersion}`) + writeFileSync(path, updated) +} + +async function main() { + const { version: cliVersion } = parseArgs() + + console.log('🔍 Scanning for packages with version 1.0.0...\n') + + // Find all package.json files in packages/typescript + const packageJsonFiles = await glob('packages/typescript/*/package.json', { + ignore: ['**/node_modules/**'], + }) + + const packagesToFix: PackageToFix[] = [] + + for (const packageJsonPath of packageJsonFiles) { + try { + const content = readFileSync(packageJsonPath, 'utf-8') + const pkg = JSON.parse(content) + + if (pkg.version === WRONG_VERSION) { + const changelogPath = packageJsonPath.replace( + 'package.json', + 'CHANGELOG.md', + ) + const detectedVersion = detectVersionFromChangelog(changelogPath) + + packagesToFix.push({ + name: pkg.name, + packageJsonPath, + changelogPath, + detectedVersion, + }) + } + } catch { + // Skip files that can't be parsed + } + } + + if (packagesToFix.length === 0) { + console.log('✅ No packages found with version 1.0.0. Nothing to fix!') + process.exit(0) + } + + console.log(`Found ${packagesToFix.length} package(s) with version 1.0.0:\n`) + + const errors: string[] = [] + + for (const pkg of packagesToFix) { + const targetVersion = cliVersion || pkg.detectedVersion + + if (!targetVersion) { + errors.push(pkg.name) + console.log(` ❌ ${pkg.name} - could not auto-detect version`) + } else { + console.log(` 📦 ${pkg.name} → ${targetVersion}`) + } + } + + if (errors.length > 0) { + console.log('\n❌ Could not auto-detect version for some packages.') + console.log(' Please run with an explicit version:\n') + console.log(' node scripts/fix-version-bump.ts --version X.Y.Z\n') + process.exit(1) + } + + console.log('\n🔧 Fixing versions...\n') + + for (const pkg of packagesToFix) { + const targetVersion = cliVersion || pkg.detectedVersion! + + // Fix package.json + fixPackageJson(pkg.packageJsonPath, targetVersion) + console.log(` ✓ ${pkg.packageJsonPath}`) + + // Fix CHANGELOG.md + try { + fixChangelog(pkg.changelogPath, targetVersion) + console.log(` ✓ ${pkg.changelogPath}`) + } catch { + console.log(` ⚠ ${pkg.changelogPath} (not found or could not update)`) + } + } + + console.log('\n✅ Done! Version bump fixed.') + console.log('\nNext steps:') + console.log(' 1. Review the changes: git diff') + console.log( + ' 2. Commit: git add -A && git commit -m "fix: correct version bump to X.Y.Z"', + ) +} + +main().catch(console.error) diff --git a/testing/panel/package.json b/testing/panel/package.json index 68820780..53628b98 100644 --- a/testing/panel/package.json +++ b/testing/panel/package.json @@ -21,9 +21,9 @@ "@tanstack/ai-openrouter": "workspace:*", "@tanstack/ai-react": "workspace:*", "@tanstack/ai-react-ui": "workspace:*", - "@tanstack/nitro-v2-vite-plugin": "^1.141.0", - "@tanstack/react-router": "^1.141.1", - "@tanstack/react-start": "^1.141.1", + "@tanstack/nitro-v2-vite-plugin": "^1.154.7", + "@tanstack/react-router": "^1.158.4", + "@tanstack/react-start": "^1.159.0", "@tanstack/start": "^1.120.20", "highlight.js": "^11.11.1", "lucide-react": "^0.561.0", diff --git a/testing/panel/src/lib/model-selection.ts b/testing/panel/src/lib/model-selection.ts index 1efc2ab3..470c6a52 100644 --- a/testing/panel/src/lib/model-selection.ts +++ b/testing/panel/src/lib/model-selection.ts @@ -83,7 +83,7 @@ export const MODEL_OPTIONS: Array = [ { provider: 'grok', model: 'grok-4', - label: 'Grok - Grok 4', + label: 'Grok - Grok 4 - slow thinking', }, { provider: 'grok', diff --git a/testing/panel/tests/tool-flow.spec.ts b/testing/panel/tests/tool-flow.spec.ts index 3c940d7a..0879d92d 100644 --- a/testing/panel/tests/tool-flow.spec.ts +++ b/testing/panel/tests/tool-flow.spec.ts @@ -154,6 +154,85 @@ for (const provider of toolProviders) { }) } +// =========================== +// Multi-turn follow-up tests +// =========================== +// These test that providers can handle follow-up messages AFTER tool calls. +// This specifically catches the Anthropic bug where consecutive user-role messages +// (tool_result + new user message) violate the alternating role constraint. + +for (const provider of toolProviders) { + test.describe(`${provider.name} - Multi-turn Tool Follow-up`, () => { + // Extended timeout for multi-turn conversations (two full LLM round-trips) + test.describe.configure({ retries: 2, timeout: 180_000 }) + + // Skip if provider is not available + test.skip( + () => !isProviderAvailable(provider), + `${provider.name} API key not configured (requires ${provider.envKey || 'no key'})`, + ) + + test('should handle follow-up message after tool call completes', async ({ + page, + }) => { + // Navigate to the chat page + await goToChatPage(page) + + // Select the provider and model + await selectProvider(page, provider.id, provider.defaultModel) + + // First message: trigger a tool call + await sendMessage( + page, + 'Use the getGuitars tool to show me what acoustic guitars you have.', + ) + + // Wait for the first response to complete (tool call + model response) + await waitForResponse(page, 120_000) + + // Verify the first turn worked - should have tool calls + const firstMessages = await getMessages(page) + const firstAssistant = firstMessages.filter( + (m: any) => m.role === 'assistant', + ) + expect(firstAssistant.length).toBeGreaterThan(0) + + // Send a follow-up message - this is where the bug manifested + // With the Anthropic bug, this would fail with consecutive user-role messages + await sendMessage(page, 'Now tell me about electric guitars instead.') + + // Wait for the follow-up response + await waitForResponse(page, 120_000) + + // Get all messages after the follow-up + const allMessages = await getMessages(page) + + // Should have at least 2 user messages and 2 assistant messages + const userMessages = allMessages.filter((m: any) => m.role === 'user') + const assistantMessages = allMessages.filter( + (m: any) => m.role === 'assistant', + ) + + expect(userMessages.length).toBeGreaterThanOrEqual(2) + expect(assistantMessages.length).toBeGreaterThanOrEqual(2) + + // The LAST assistant message should have non-empty text content + // (not just tool calls, and not an error) + const lastAssistant = assistantMessages[assistantMessages.length - 1] + const textParts = lastAssistant.parts?.filter( + (p: any) => p.type === 'text' && p.content && p.content.length > 0, + ) + + // The follow-up should have produced some text OR tool calls + // (both are valid responses - the key is it didn't error out) + const hasText = textParts?.length > 0 + const hasTools = + lastAssistant.parts?.some((p: any) => p.type === 'tool-call') || false + expect(hasText || hasTools).toBe(true) + }) + }) +} + // Verify we have tool-capable providers to test test('at least one tool-capable provider should be available', async () => { const available = getToolCapableProviders() From 4ab96362437bf1e20043729ccb3ce951b723368d Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Thu, 12 Feb 2026 17:25:26 +0100 Subject: [PATCH 07/16] feat: Add new Groq AI adapter package, including Groq-specific message types, utilities, and text adapter. --- package.json | 1 + .../typescript/ai-groq/src/adapters/text.ts | 597 ++++++++++++++++++ packages/typescript/ai-groq/src/index.ts | 33 + .../typescript/ai-groq/src/message-types.ts | 359 +++++++++++ packages/typescript/ai-groq/src/model-meta.ts | 369 +++++++++++ .../ai-groq/src/text/text-provider-options.ts | 225 +++++++ .../ai-groq/src/tools/function-tool.ts | 39 ++ .../typescript/ai-groq/src/tools/index.ts | 5 + .../ai-groq/src/tools/tool-converter.ts | 15 + .../typescript/ai-groq/src/utils/index.ts | 10 + .../ai-groq/src/utils/schema-converter.ts | 102 +++ .../ai-groq/tests/groq-adapter.test.ts | 581 +++++++++++++++++ 12 files changed, 2336 insertions(+) create mode 100644 packages/typescript/ai-groq/src/adapters/text.ts create mode 100644 packages/typescript/ai-groq/src/message-types.ts create mode 100644 packages/typescript/ai-groq/src/model-meta.ts create mode 100644 packages/typescript/ai-groq/src/text/text-provider-options.ts create mode 100644 packages/typescript/ai-groq/src/tools/function-tool.ts create mode 100644 packages/typescript/ai-groq/src/tools/index.ts create mode 100644 packages/typescript/ai-groq/src/tools/tool-converter.ts create mode 100644 packages/typescript/ai-groq/src/utils/index.ts create mode 100644 packages/typescript/ai-groq/src/utils/schema-converter.ts create mode 100644 packages/typescript/ai-groq/tests/groq-adapter.test.ts diff --git a/package.json b/package.json index 1bbda7ce..0f44a074 100644 --- a/package.json +++ b/package.json @@ -72,6 +72,7 @@ "vitest": "^4.0.14" }, "dependencies": { + "ai-groq": "link:../../.local/share/pnpm/global/5/node_modules/@tanstack/ai-groq", "groq-sdk": "^0.37.0" } } diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts new file mode 100644 index 00000000..969a50aa --- /dev/null +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -0,0 +1,597 @@ +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { validateTextProviderOptions } from '../text/text-provider-options' +import { convertToolsToProviderFormat } from '../tools' +import { + createGroqClient, + generateId, + getGroqApiKeyFromEnv, + makeGroqStructuredOutputCompatible, + transformNullsToUndefined, +} from '../utils' +import type { + GROQ_CHAT_MODELS, + ResolveInputModalities, + ResolveProviderOptions, +} from '../model-meta' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type GROQ_SDK from 'groq-sdk' +import type { ChatCompletionCreateParamsStreaming, ChatCompletionTool } from 'groq-sdk/resources/chat/completions' +import type { + ContentPart, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' +import type { InternalTextProviderOptions } from '../text/text-provider-options' +import type { + ChatCompletionMessageParam, + GroqImageMetadata, + GroqMessageMetadataByModality, +} from '../message-types' +import type { GroqClientConfig } from '../utils' + +/** + * Configuration for Groq text adapter + */ +export interface GroqTextConfig extends GroqClientConfig { } + +/** + * Alias for TextProviderOptions for external use + */ +export type { ExternalTextProviderOptions as GroqTextProviderOptions } from '../text/text-provider-options' + +/** + * Groq Text (Chat) Adapter + * + * Tree-shakeable adapter for Groq chat/text completion functionality. + * Uses the Groq SDK which provides an OpenAI-compatible Chat Completions API. + */ +export class GroqTextAdapter< + TModel extends (typeof GROQ_CHAT_MODELS)[number], +> extends BaseTextAdapter< + TModel, + ResolveProviderOptions, + ResolveInputModalities, + GroqMessageMetadataByModality +> { + readonly kind = 'text' as const + readonly name = 'groq' as const + + private client: GROQ_SDK + + constructor(config: GroqTextConfig, model: TModel) { + super({}, model) + this.client = createGroqClient(config) + } + + async *chatStream( + options: TextOptions>, + ): AsyncIterable { + const requestParams = this.mapTextOptionsToGroq(options) + const timestamp = Date.now() + + const aguiState = { + runId: generateId(this.name), + messageId: generateId(this.name), + timestamp, + hasEmittedRunStarted: false, + } + + try { + const stream = await this.client.chat.completions.create({ + ...requestParams, + stream: true, + } as ChatCompletionCreateParamsStreaming) + + yield* this.processGroqStreamChunks( + stream as AsyncIterable, + options, + aguiState, + ) + } catch (error: unknown) { + const err = error as Error & { code?: string } + + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: 'RUN_STARTED', + runId: aguiState.runId, + model: options.model, + timestamp, + } + } + + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error', + code: err.code, + }, + } + + console.error('>>> chatStream: Fatal error during response creation <<<') + console.error('>>> Error message:', err.message) + console.error('>>> Error stack:', err.stack) + console.error('>>> Full error:', err) + } + } + + /** + * Generate structured output using Groq's JSON Schema response format. + * Uses stream: false to get the complete response in one call. + * + * Groq has strict requirements for structured output: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for all objects + * + * The outputSchema is already JSON Schema (converted in the ai layer). + * We apply Groq-specific transformations for structured output compatibility. + */ + async structuredOutput( + options: StructuredOutputOptions>, + ): Promise> { + const { chatOptions, outputSchema } = options + const requestParams = this.mapTextOptionsToGroq(chatOptions) + + const jsonSchema = makeGroqStructuredOutputCompatible( + outputSchema, + outputSchema.required || [], + ) + + try { + const response = await this.client.chat.completions.create({ + ...requestParams, + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: jsonSchema, + strict: true, + }, + }, + }) + + const rawText = response.choices[0]?.message.content || '' + + let parsed: unknown + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + + const transformed = transformNullsToUndefined(parsed) + + return { + data: transformed, + rawText, + } + } catch (error: unknown) { + const err = error as Error + console.error('>>> structuredOutput: Error during response creation <<<') + console.error('>>> Error message:', err.message) + throw error + } + } + + /** + * Processes streaming chunks from the Groq API and yields AG-UI stream events. + * Handles text content deltas, tool call assembly, and lifecycle events. + */ + private async *processGroqStreamChunks( + stream: AsyncIterable, + options: TextOptions, + aguiState: { + runId: string + messageId: string + timestamp: number + hasEmittedRunStarted: boolean + }, + ): AsyncIterable { + let accumulatedContent = '' + const timestamp = aguiState.timestamp + let hasEmittedTextMessageStart = false + + const toolCallsInProgress = new Map< + number, + { + id: string + name: string + arguments: string + started: boolean + } + >() + + try { + for await (const chunk of stream) { + const choice = chunk.choices[0] + + if (!choice) continue + + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: 'RUN_STARTED', + runId: aguiState.runId, + model: chunk.model || options.model, + timestamp, + } + } + + const delta = choice.delta + const deltaContent = delta.content + const deltaToolCalls = delta.tool_calls + + if (deltaContent) { + if (!hasEmittedTextMessageStart) { + hasEmittedTextMessageStart = true + yield { + type: 'TEXT_MESSAGE_START', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + role: 'assistant', + } + } + + accumulatedContent += deltaContent + + yield { + type: 'TEXT_MESSAGE_CONTENT', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + delta: deltaContent || '', + content: accumulatedContent, + } + } + + if (deltaToolCalls) { + for (const toolCallDelta of deltaToolCalls) { + const index = toolCallDelta.index + + if (!toolCallsInProgress.has(index)) { + toolCallsInProgress.set(index, { + id: toolCallDelta.id || '', + name: toolCallDelta.function?.name || '', + arguments: '', + started: false, + }) + } + + const toolCall = toolCallsInProgress.get(index)! + + if (toolCallDelta.id) { + toolCall.id = toolCallDelta.id + } + if (toolCallDelta.function?.name) { + toolCall.name = toolCallDelta.function.name + } + if (toolCallDelta.function?.arguments) { + toolCall.arguments += toolCallDelta.function.arguments + } + + if (toolCall.id && toolCall.name && !toolCall.started) { + toolCall.started = true + yield { + type: 'TOOL_CALL_START', + toolCallId: toolCall.id, + toolName: toolCall.name, + model: chunk.model || options.model, + timestamp, + index, + } + } + + if (toolCallDelta.function?.arguments && toolCall.started) { + yield { + type: 'TOOL_CALL_ARGS', + toolCallId: toolCall.id, + model: chunk.model || options.model, + timestamp, + delta: toolCallDelta.function.arguments, + } + } + } + } + + if (choice.finish_reason) { + if ( + choice.finish_reason === 'tool_calls' || + toolCallsInProgress.size > 0 + ) { + for (const [, toolCall] of toolCallsInProgress) { + let parsedInput: unknown = {} + try { + parsedInput = toolCall.arguments + ? JSON.parse(toolCall.arguments) + : {} + } catch { + parsedInput = {} + } + + yield { + type: 'TOOL_CALL_END', + toolCallId: toolCall.id, + toolName: toolCall.name, + model: chunk.model || options.model, + timestamp, + input: parsedInput, + } + } + } + + const computedFinishReason = + choice.finish_reason === 'tool_calls' || + toolCallsInProgress.size > 0 + ? 'tool_calls' + : 'stop' + + if (hasEmittedTextMessageStart) { + yield { + type: 'TEXT_MESSAGE_END', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + } + } + + yield { + type: 'RUN_FINISHED', + runId: aguiState.runId, + model: chunk.model || options.model, + timestamp, + usage: chunk.x_groq?.usage + ? { + promptTokens: chunk.x_groq.usage.prompt_tokens || 0, + completionTokens: chunk.x_groq.usage.completion_tokens || 0, + totalTokens: chunk.x_groq.usage.total_tokens || 0, + } + : undefined, + finishReason: computedFinishReason, + } + } + } + } catch (error: unknown) { + const err = error as Error & { code?: string } + console.log('[Groq Adapter] Stream ended with error:', err.message) + + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error occurred', + code: err.code, + }, + } + } + } + + /** + * Maps common TextOptions to Groq-specific Chat Completions request parameters. + */ + private mapTextOptionsToGroq( + options: TextOptions, + ): ChatCompletionCreateParamsStreaming { + const modelOptions = options.modelOptions as + | Omit< + InternalTextProviderOptions, + 'max_tokens' | 'tools' | 'temperature' | 'input' | 'top_p' + > + | undefined + + if (modelOptions) { + validateTextProviderOptions({ + ...modelOptions, + model: options.model, + }) + } + + const tools = options.tools + ? convertToolsToProviderFormat(options.tools) + : undefined + + const messages: Array = [] + + if (options.systemPrompts && options.systemPrompts.length > 0) { + messages.push({ + role: 'system', + content: options.systemPrompts.join('\n'), + }) + } + + for (const message of options.messages) { + messages.push(this.convertMessageToGroq(message)) + } + + return { + model: options.model, + messages, + temperature: options.temperature, + max_tokens: options.maxTokens, + top_p: options.topP, + tools: tools as Array, + stream: true, + } + } + + /** + * Converts a TanStack AI ModelMessage to a Groq ChatCompletionMessageParam. + * Handles tool, assistant, and user messages including multimodal content. + */ + private convertMessageToGroq( + message: ModelMessage, + ): ChatCompletionMessageParam { + if (message.role === 'tool') { + return { + role: 'tool', + tool_call_id: message.toolCallId || '', + content: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + } + } + + if (message.role === 'assistant') { + const toolCalls = message.toolCalls?.map((tc) => ({ + id: tc.id, + type: 'function' as const, + function: { + name: tc.function.name, + arguments: + typeof tc.function.arguments === 'string' + ? tc.function.arguments + : JSON.stringify(tc.function.arguments), + }, + })) + + return { + role: 'assistant', + content: this.extractTextContent(message.content), + ...(toolCalls && toolCalls.length > 0 ? { tool_calls: toolCalls } : {}), + } + } + + const contentParts = this.normalizeContent(message.content) + + if (contentParts.length === 1 && contentParts[0]?.type === 'text') { + return { + role: 'user', + content: contentParts[0].content, + } + } + + const parts: Array< + | { type: 'text'; text: string } + | { type: 'image_url'; image_url: { url: string; detail?: string } } + > = [] + for (const part of contentParts) { + if (part.type === 'text') { + parts.push({ type: 'text', text: part.content }) + } else if (part.type === 'image') { + const imageMetadata = part.metadata as GroqImageMetadata | undefined + const imageValue = part.source.value + const imageUrl = + part.source.type === 'data' && !imageValue.startsWith('data:') + ? `data:${part.source.mimeType};base64,${imageValue}` + : imageValue + parts.push({ + type: 'image_url', + image_url: { + url: imageUrl, + detail: imageMetadata?.detail || 'auto', + }, + }) + } + } + + return { + role: 'user', + content: parts.length > 0 ? (parts as any) : '', + } + } + + /** + * Normalizes message content to an array of ContentPart. + * Handles backward compatibility with string content. + */ + private normalizeContent( + content: string | null | Array, + ): Array { + if (content === null) { + return [] + } + if (typeof content === 'string') { + return [{ type: 'text', content: content }] + } + return content + } + + /** + * Extracts text content from a content value that may be string, null, or ContentPart array. + */ + private extractTextContent( + content: string | null | Array, + ): string { + if (content === null) { + return '' + } + if (typeof content === 'string') { + return content + } + return content + .filter((p) => p.type === 'text') + .map((p) => p.content) + .join('') + } +} + +/** + * Creates a Groq text adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'llama-3.3-70b-versatile', 'openai/gpt-oss-120b') + * @param apiKey - Your Groq API key + * @param config - Optional additional configuration + * @returns Configured Groq text adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createGroqText('llama-3.3-70b-versatile', "gsk_..."); + * // adapter has type-safe providerOptions for llama-3.3-70b-versatile + * ``` + */ +export function createGroqText< + TModel extends (typeof GROQ_CHAT_MODELS)[number], +>( + model: TModel, + apiKey: string, + config?: Omit, +): GroqTextAdapter { + return new GroqTextAdapter({ apiKey, ...config }, model) +} + +/** + * Creates a Groq text adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `GROQ_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'llama-3.3-70b-versatile', 'openai/gpt-oss-120b') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Groq text adapter instance with resolved types + * @throws Error if GROQ_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses GROQ_API_KEY from environment + * const adapter = groqText('llama-3.3-70b-versatile'); + * + * const stream = chat({ + * adapter, + * messages: [{ role: "user", content: "Hello!" }] + * }); + * ``` + */ +export function groqText( + model: TModel, + config?: Omit, +): GroqTextAdapter { + const apiKey = getGroqApiKeyFromEnv() + return createGroqText(model, apiKey, config) +} diff --git a/packages/typescript/ai-groq/src/index.ts b/packages/typescript/ai-groq/src/index.ts index e69de29b..d38f09d3 100644 --- a/packages/typescript/ai-groq/src/index.ts +++ b/packages/typescript/ai-groq/src/index.ts @@ -0,0 +1,33 @@ +/** + * @module @tanstack/ai-groq + * + * Groq provider adapter for TanStack AI. + * Provides tree-shakeable adapters for Groq's Chat Completions API. + */ + +// Text (Chat) adapter +export { + GroqTextAdapter, + createGroqText, + groqText, + type GroqTextConfig, + type GroqTextProviderOptions, +} from './adapters/text' + +// Types +export type { + GroqChatModelProviderOptionsByName, + GroqModelInputModalitiesByName, + ResolveProviderOptions, + ResolveInputModalities, + GroqChatModels, +} from './model-meta' +export { GROQ_CHAT_MODELS } from './model-meta' +export type { + GroqTextMetadata, + GroqImageMetadata, + GroqAudioMetadata, + GroqVideoMetadata, + GroqDocumentMetadata, + GroqMessageMetadataByModality, +} from './message-types' diff --git a/packages/typescript/ai-groq/src/message-types.ts b/packages/typescript/ai-groq/src/message-types.ts new file mode 100644 index 00000000..907c2d9e --- /dev/null +++ b/packages/typescript/ai-groq/src/message-types.ts @@ -0,0 +1,359 @@ +/** + * Groq-specific message types for the Chat Completions API. + * + * These type definitions mirror the Groq SDK types and are used internally + * by the adapter to avoid tight coupling to the SDK's exported types. + * + * @see https://console.groq.com/docs/api-reference#chat + */ + +export interface ChatCompletionContentPartText { + /** The text content. */ + text: string + + /** The type of the content part. */ + type: 'text' +} + +export interface ChatCompletionContentPartImage { + image_url: { + /** Either a URL of the image or the base64 encoded image data. */ + url: string + + /** Specifies the detail level of the image. */ + detail?: 'auto' | 'low' | 'high' + } + + /** The type of the content part. */ + type: 'image_url' +} + +export interface ChatCompletionMessageToolCall { + /** The ID of the tool call. */ + id: string + + /** The function that the model called. */ + function: { + /** + * The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may + * hallucinate parameters not defined by your function schema. Validate the + * arguments in your code before calling your function. + */ + arguments: string + + /** The name of the function to call. */ + name: string + } + + /** The type of the tool. Currently, only `function` is supported. */ + type: 'function' +} + +export interface ChatCompletionRequestMessageContentPartDocument { + document: { + /** The JSON document data. */ + data: { [key: string]: unknown } + + /** Optional unique identifier for the document. */ + id?: string | null + } + + /** The type of the content part. */ + type: 'document' +} + +export type FunctionParameters = { [key: string]: unknown } + +export interface ChatCompletionNamedToolChoice { + Function: { + /** The name of the function to call. */ + name: string + } +} + +export interface FunctionDefinition { + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + * underscores and dashes, with a maximum length of 64. + */ + name: string + + /** + * A description of what the function does, used by the model to choose when and + * how to call the function. + */ + description?: string + + /** + * Function parameters defined as a JSON Schema object. + * @see https://json-schema.org/understanding-json-schema/ + */ + parameters?: FunctionParameters + + /** + * Whether to enable strict schema adherence when generating the output. If set to + * true, the model will always follow the exact schema defined in the `schema` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. + */ + strict?: boolean +} + +/** + * Controls which (if any) tool is called by the model. + * + * - `none` — the model will not call any tool and instead generates a message + * - `auto` — the model can pick between generating a message or calling tools + * - `required` — the model must call one or more tools + * - Named tool choice — forces the model to call a specific tool + */ +export type ChatCompletionToolChoiceOption = + | 'none' + | 'auto' + | 'required' + | ChatCompletionNamedToolChoice + +export type ChatCompletionContentPart = + | ChatCompletionContentPartText + | ChatCompletionContentPartImage + | ChatCompletionRequestMessageContentPartDocument + +export interface ChatCompletionAssistantMessageParam { + /** The role of the messages author, in this case `assistant`. */ + role: 'assistant' + + /** + * The contents of the assistant message. Required unless `tool_calls` or + * `function_call` is specified. + */ + content?: string | Array | null + + /** An optional name for the participant. */ + name?: string + + /** + * The reasoning output by the assistant if reasoning_format was set to 'parsed'. + * This field is only useable with qwen3 models. + */ + reasoning?: string | null + + /** The tool calls generated by the model, such as function calls. */ + tool_calls?: Array +} + +export interface ChatCompletionTool { + /** + * The type of the tool. `function`, `browser_search`, and `code_interpreter` are + * supported. + */ + type: 'function' | 'browser_search' | 'code_interpreter' | (string & {}) + + function?: FunctionDefinition +} + +export interface ChatCompletionToolMessageParam { + /** The contents of the tool message. */ + content: string | Array + + /** The role of the messages author, in this case `tool`. */ + role: 'tool' + + /** Tool call that this message is responding to. */ + tool_call_id: string +} + +export interface ChatCompletionSystemMessageParam { + /** The contents of the system message. */ + content: string | Array + + /** The role of the messages author, in this case `system`. */ + role: 'system' | 'developer' + + /** An optional name for the participant. */ + name?: string +} + +export interface ChatCompletionUserMessageParam { + /** The contents of the user message. */ + content: string | Array + + /** The role of the messages author, in this case `user`. */ + role: 'user' + + /** An optional name for the participant. */ + name?: string +} + +/** + * Union of all supported chat completion message params. + */ +export type ChatCompletionMessageParam = + | ChatCompletionSystemMessageParam + | ChatCompletionUserMessageParam + | ChatCompletionAssistantMessageParam + | ChatCompletionToolMessageParam + +export interface CompoundCustomModels { + /** Custom model to use for answering. */ + answering_model?: string | null + + /** Custom model to use for reasoning. */ + reasoning_model?: string | null +} + +export interface CompoundCustomTools { + /** A list of tool names that are enabled for the request. */ + enabled_tools?: Array | null + + /** Configuration for the Wolfram tool integration. */ + wolfram_settings?: CompoundCustomToolsWolframSettings | null +} + +export interface CompoundCustomToolsWolframSettings { + /** API key used to authorize requests to Wolfram services. */ + authorization?: string | null +} + +export interface CompoundCustom { + models?: CompoundCustomModels | null + + /** Configuration options for tools available to Compound. */ + tools?: CompoundCustomTools | null +} + +export interface DocumentSourceText { + /** The document contents. */ + text: string + + /** Identifies this document source as inline text. */ + type: 'text' +} + +export interface DocumentSourceJson { + /** The JSON payload associated with the document. */ + data: { [key: string]: unknown } + + /** Identifies this document source as JSON data. */ + type: 'json' +} + +export interface Document { + /** The source of the document. Only text and JSON sources are currently supported. */ + source: DocumentSourceText | DocumentSourceJson + + /** Optional unique identifier that can be used for citations in responses. */ + id?: string | null +} + +export interface ResponseFormatText { + /** The type of response format being defined. Always `text`. */ + type: 'text' +} + +export interface ResponseFormatJsonSchemaJsonSchema { + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name: string + + /** + * A description of what the response format is for, used by the model to determine + * how to respond in the format. + */ + description?: string + + /** + * The schema for the response format, described as a JSON Schema object. + * @see https://json-schema.org/ + */ + schema?: { [key: string]: unknown } + + /** + * Whether to enable strict schema adherence when generating the output. If set to + * true, the model will always follow the exact schema defined in the `schema` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. + */ + strict?: boolean | null +} + +export interface ResponseFormatJsonSchema { + /** Structured Outputs configuration options, including a JSON Schema. */ + json_schema: ResponseFormatJsonSchemaJsonSchema + + /** The type of response format being defined. Always `json_schema`. */ + type: 'json_schema' +} + +export interface ResponseFormatJsonObject { + /** The type of response format being defined. Always `json_object`. */ + type: 'json_object' +} + +export interface SearchSettings { + /** + * Name of country to prioritize search results from + * (e.g., "united states", "germany", "france"). + */ + country?: string | null + + /** A list of domains to exclude from the search results. */ + exclude_domains?: Array | null + + /** A list of domains to include in the search results. */ + include_domains?: Array | null + + /** Whether to include images in the search results. */ + include_images?: boolean | null +} + +/** + * Metadata for Groq document content parts. + */ +export interface GroqDocumentMetadata { } + +/** + * Metadata for Groq text content parts. + * Currently no specific metadata options for text in Groq. + */ +export interface GroqTextMetadata { } + +/** + * Metadata for Groq image content parts. + * Controls how the model processes and analyzes images. + */ +export interface GroqImageMetadata { + /** + * Specifies the detail level of the image. + * - 'auto': Let the model decide based on image size and content + * - 'low': Use low resolution processing (faster, cheaper, less detail) + * - 'high': Use high resolution processing (slower, more expensive, more detail) + * + * @default 'auto' + */ + detail?: 'auto' | 'low' | 'high' +} + +/** + * Metadata for Groq audio content parts. + * Note: Audio support in Groq is limited; check current API capabilities. + */ +export interface GroqAudioMetadata { } + +/** + * Metadata for Groq video content parts. + * Note: Groq does not currently support video input. + */ +export interface GroqVideoMetadata { } + +/** + * Map of modality types to their Groq-specific metadata types. + * Used for type inference when constructing multimodal messages. + */ +export interface GroqMessageMetadataByModality { + text: GroqTextMetadata + image: GroqImageMetadata + audio: GroqAudioMetadata + video: GroqVideoMetadata + document: GroqDocumentMetadata +} diff --git a/packages/typescript/ai-groq/src/model-meta.ts b/packages/typescript/ai-groq/src/model-meta.ts new file mode 100644 index 00000000..4081334f --- /dev/null +++ b/packages/typescript/ai-groq/src/model-meta.ts @@ -0,0 +1,369 @@ +import type { GroqTextProviderOptions } from './text/text-provider-options' + +/** + * Internal metadata structure describing a Groq model's capabilities and pricing. + */ +interface ModelMeta { + name: string + context_window?: number + max_completion_tokens?: number + pricing: { + input?: { normal: number; cached?: number } + output?: { normal: number } + } + supports: { + input: Array<'text' | 'image' | 'audio'> + output: Array<'text' | 'audio'> + endpoints: Array<'chat' | 'tts' | 'transcription' | 'batch'> + + features: Array< + | 'streaming' + | 'tools' + | 'json_object' + | 'browser_search' + | 'code_execution' + | 'reasoning' + | 'content_moderation' + | 'json_schema' + | 'vision' + > + } + /** + * Type-level description of which provider options this model supports. + */ + providerOptions?: TProviderOptions +} + +const LLAMA_3_3_70B_VERSATILE = { + name: 'llama-3.3-70b-versatile', + context_window: 131_072, + max_completion_tokens: 32_768, + pricing: { + input: { + normal: 0.59, + }, + output: { + normal: 0.79, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object'], + }, +} as const satisfies ModelMeta + +const LLAMA_4_MAVERICK_17B_128E_INSTRUCT = { + name: 'meta-llama/llama-4-maverick-17b-128e-instruct', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { + normal: 0.2, + }, + output: { + normal: 0.6, + }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema', 'vision'], + }, +} as const satisfies ModelMeta + +const LLAMA_4_SCOUT_17B_16E_INSTRUCT = { + name: 'meta-llama/llama-4-scout-17b-16e-instruct', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { + normal: 0.05, + }, + output: { + normal: 0.08, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object'], + }, +} as const satisfies ModelMeta + +const LLAMA_GUARD_4_12B = { + name: 'meta-llama/llama-guard-4-12b', + context_window: 131_072, + max_completion_tokens: 1024, + pricing: { + input: { + normal: 0.2, + }, + output: { + normal: 0.2, + }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'json_object', 'content_moderation', 'vision'], + }, +} as const satisfies ModelMeta + +const LLAMA_PROMPT_GUARD_2_86M = { + name: 'meta-llama/llama-prompt-guard-2-86m', + context_window: 512, + max_completion_tokens: 512, + pricing: { + input: { + normal: 0.04, + }, + output: { + normal: 0.04, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'content_moderation', 'json_object'], + }, +} as const satisfies ModelMeta + +const LLAMA_3_1_8B_INSTANT = { + name: 'llama-3.1-8b-instant', + context_window: 131_072, + max_completion_tokens: 131_072, + pricing: { + input: { + normal: 0.05, + }, + output: { + normal: 0.08, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'json_object', 'tools'], + }, +} as const satisfies ModelMeta + +const LLAMA_PROMPT_GUARD_2_22M = { + name: 'meta-llama/llama-prompt-guard-2-22m', + context_window: 512, + max_completion_tokens: 512, + pricing: { + input: { + normal: 0.03, + }, + output: { + normal: 0.03, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'content_moderation'], + }, +} as const satisfies ModelMeta + +const GPT_OSS_120B = { + name: 'openai/gpt-oss-120b', + context_window: 131_072, + max_completion_tokens: 65_536, + pricing: { + input: { + normal: 0.15, + cached: 0.075, + }, + output: { + normal: 0.6, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: [ + 'streaming', + 'json_object', + 'json_schema', + 'tools', + 'browser_search', + 'code_execution', + 'reasoning', + ], + }, +} as const satisfies ModelMeta + +const GPT_OSS_SAFEGUARD_20B = { + name: 'openai/gpt-oss-safeguard-20b', + context_window: 131_072, + max_completion_tokens: 65_536, + pricing: { + input: { + normal: 0.075, + cached: 0.037, + }, + output: { + normal: 0.3, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: [ + 'streaming', + 'tools', + 'browser_search', + 'code_execution', + 'json_object', + 'json_schema', + 'reasoning', + 'content_moderation', + ], + }, +} as const satisfies ModelMeta + +const GPT_OSS_20B = { + name: 'openai/gpt-oss-20b', + context_window: 131_072, + max_completion_tokens: 65536, + pricing: { + input: { + normal: 0.075, + cached: 0.037, + }, + output: { + normal: 0.3, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: [ + 'streaming', + 'browser_search', + 'code_execution', + 'json_object', + 'json_schema', + 'reasoning', + ], + }, +} as const satisfies ModelMeta + +const KIMI_K2_INSTRUCT_0905 = { + name: 'moonshotai/kimi-k2-instruct-0905', + context_window: 262_144, + max_completion_tokens: 16_384, + pricing: { + input: { + normal: 1, + cached: 0.5, + }, + output: { + normal: 3, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const QWEN3_32B = { + name: 'qwen/qwen3-32b', + context_window: 131_072, + max_completion_tokens: 40_960, + pricing: { + input: { + normal: 0.29, + }, + output: { + normal: 0.59, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'reasoning'], + }, +} as const satisfies ModelMeta + +/** + * All supported Groq chat model identifiers. + */ +export const GROQ_CHAT_MODELS = [ + LLAMA_3_1_8B_INSTANT.name, + LLAMA_3_3_70B_VERSATILE.name, + LLAMA_4_MAVERICK_17B_128E_INSTRUCT.name, + LLAMA_4_SCOUT_17B_16E_INSTRUCT.name, + LLAMA_GUARD_4_12B.name, + LLAMA_PROMPT_GUARD_2_86M.name, + LLAMA_PROMPT_GUARD_2_22M.name, + GPT_OSS_20B.name, + GPT_OSS_120B.name, + GPT_OSS_SAFEGUARD_20B.name, + KIMI_K2_INSTRUCT_0905.name, + QWEN3_32B.name, +] + +/** + * Union type of all supported Groq chat model names. + */ +export type GroqChatModels = (typeof GROQ_CHAT_MODELS)[number] + +/** + * Type-only map from Groq chat model name to its supported input modalities. + */ +export type GroqModelInputModalitiesByName = { + [LLAMA_3_1_8B_INSTANT.name]: typeof LLAMA_3_1_8B_INSTANT.supports.input + [LLAMA_3_3_70B_VERSATILE.name]: typeof LLAMA_3_3_70B_VERSATILE.supports.input + [LLAMA_4_MAVERICK_17B_128E_INSTRUCT.name]: typeof LLAMA_4_MAVERICK_17B_128E_INSTRUCT.supports.input + [LLAMA_4_SCOUT_17B_16E_INSTRUCT.name]: typeof LLAMA_4_SCOUT_17B_16E_INSTRUCT.supports.input + [LLAMA_GUARD_4_12B.name]: typeof LLAMA_GUARD_4_12B.supports.input + [LLAMA_PROMPT_GUARD_2_86M.name]: typeof LLAMA_PROMPT_GUARD_2_86M.supports.input + [LLAMA_PROMPT_GUARD_2_22M.name]: typeof LLAMA_PROMPT_GUARD_2_22M.supports.input + [GPT_OSS_20B.name]: typeof GPT_OSS_20B.supports.input + [GPT_OSS_120B.name]: typeof GPT_OSS_120B.supports.input + [GPT_OSS_SAFEGUARD_20B.name]: typeof GPT_OSS_SAFEGUARD_20B.supports.input + [KIMI_K2_INSTRUCT_0905.name]: typeof KIMI_K2_INSTRUCT_0905.supports.input + [QWEN3_32B.name]: typeof QWEN3_32B.supports.input +} + +/** + * Type-only map from Groq chat model name to its provider options type. + */ +export type GroqChatModelProviderOptionsByName = { + [K in (typeof GROQ_CHAT_MODELS)[number]]: GroqTextProviderOptions +} + +/** + * Resolves the provider options type for a specific Groq model. + * Falls back to generic GroqTextProviderOptions for unknown models. + */ +export type ResolveProviderOptions = + TModel extends keyof GroqChatModelProviderOptionsByName + ? GroqChatModelProviderOptionsByName[TModel] + : GroqTextProviderOptions + +/** + * Resolve input modalities for a specific model. + * If the model has explicit modalities in the map, use those; otherwise use text only. + */ +export type ResolveInputModalities = + TModel extends keyof GroqModelInputModalitiesByName + ? GroqModelInputModalitiesByName[TModel] + : readonly ['text'] diff --git a/packages/typescript/ai-groq/src/text/text-provider-options.ts b/packages/typescript/ai-groq/src/text/text-provider-options.ts new file mode 100644 index 00000000..2c8c5257 --- /dev/null +++ b/packages/typescript/ai-groq/src/text/text-provider-options.ts @@ -0,0 +1,225 @@ +import type { + ChatCompletionMessageParam, + ChatCompletionTool, + ChatCompletionToolChoiceOption, + CompoundCustom, + Document, + ResponseFormatJsonObject, + ResponseFormatJsonSchema, + ResponseFormatText, + SearchSettings, +} from '../message-types' + +/** + * Groq-specific provider options for text/chat models. + * + * These options extend the standard Chat Completions API parameters + * with Groq-specific features like compound models and search settings. + * + * @see https://console.groq.com/docs/api-reference#chat + */ +export interface GroqTextProviderOptions { + /** + * Whether to enable citations in the response. When enabled, the model will + * include citations for information retrieved from provided documents or web + * searches. + */ + citation_options?: 'enabled' | 'disabled' | null + + /** Custom configuration of models and tools for Compound. */ + compound_custom?: CompoundCustom | null + + /** + * If set to true, groq will return called tools without validating that the tool + * is present in request.tools. tool_choice=required/none will still be enforced, + * but the request cannot require a specific tool be used. + */ + disable_tool_validation?: boolean + + /** + * A list of documents to provide context for the conversation. Each document + * contains text that can be referenced by the model. + */ + documents?: Array | null + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their + * existing frequency in the text so far, decreasing the model's likelihood to + * repeat the same line verbatim. + */ + frequency_penalty?: number | null + + /** + * Whether to include reasoning in the response. This field is mutually exclusive + * with `reasoning_format`. + */ + include_reasoning?: boolean | null + + /** Modify the likelihood of specified tokens appearing in the completion. */ + logit_bias?: { [key: string]: number } | null + + /** + * Whether to return log probabilities of the output tokens or not. If true, + * returns the log probabilities of each output token returned in the `content` + * of `message`. + */ + logprobs?: boolean | null + + /** + * The maximum number of tokens that can be generated in the chat completion. The + * total length of input tokens and generated tokens is limited by the model's + * context length. + */ + max_completion_tokens?: number | null + + /** Request metadata. */ + metadata?: { [key: string]: string } | null + + /** + * How many chat completion choices to generate for each input message. + * Currently only n=1 is supported. + */ + n?: number | null + + /** Whether to enable parallel function calling during tool use. */ + parallel_tool_calls?: boolean | null + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on + * whether they appear in the text so far, increasing the model's likelihood to + * talk about new topics. + */ + presence_penalty?: number | null + + /** + * Controls reasoning effort for supported models. + * + * - qwen3 models: `'none'` to disable, `'default'` or null to enable + * - openai/gpt-oss models: `'low'`, `'medium'` (default), or `'high'` + */ + reasoning_effort?: 'none' | 'default' | 'low' | 'medium' | 'high' | null + + /** + * Specifies how to output reasoning tokens. + * This field is mutually exclusive with `include_reasoning`. + */ + reasoning_format?: 'hidden' | 'raw' | 'parsed' | null + + /** + * An object specifying the format that the model must output. + * + * - `json_schema` — enables Structured Outputs (preferred) + * - `json_object` — enables the older JSON mode + * - `text` — plain text output (default) + * + * @see https://console.groq.com/docs/structured-outputs + */ + response_format?: + | ResponseFormatText + | ResponseFormatJsonSchema + | ResponseFormatJsonObject + | null + + /** Settings for web search functionality when the model uses a web search tool. */ + search_settings?: SearchSettings | null + + /** + * If specified, our system will make a best effort to sample deterministically, + * such that repeated requests with the same `seed` and parameters should return + * the same result. + */ + seed?: number | null + + /** + * The service tier to use for the request. + * + * - `auto` — automatically select the highest tier available + * - `flex` — uses the flex tier, which will succeed or fail quickly + */ + service_tier?: 'auto' | 'on_demand' | 'flex' | 'performance' | null + + /** + * Up to 4 sequences where the API will stop generating further tokens. + * The returned text will not contain the stop sequence. + */ + stop?: string | null | Array + + /** Whether to store the request for future use. */ + store?: boolean | null + + /** + * Sampling temperature between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused + * and deterministic. We generally recommend altering this or top_p but not both. + */ + temperature?: number | null + + /** + * Controls which (if any) tool is called by the model. + * + * - `none` — never call tools + * - `auto` — model decides (default when tools are present) + * - `required` — model must call tools + * - Named choice — forces a specific tool + */ + tool_choice?: ChatCompletionToolChoiceOption | null + + /** + * An integer between 0 and 20 specifying the number of most likely tokens to + * return at each token position. `logprobs` must be set to `true` if this + * parameter is used. + */ + top_logprobs?: number | null + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + */ + top_p?: number | null + + /** + * A unique identifier representing your end-user, which can help monitor and + * detect abuse. + */ + user?: string | null +} + +/** + * Internal options interface used for validation within the adapter. + * Extends provider options with required fields for API requests. + */ +export interface InternalTextProviderOptions extends GroqTextProviderOptions { + /** An array of messages comprising the conversation. */ + messages: Array + + /** + * The model name (e.g. "llama-3.3-70b-versatile", "openai/gpt-oss-120b"). + * @see https://console.groq.com/docs/models + */ + model: string + + /** Whether to stream partial message deltas as server-sent events. */ + stream?: boolean | null + + /** + * Tools the model may call (functions, code_interpreter, etc). + * @see https://console.groq.com/docs/tool-use + */ + tools?: Array +} + +/** + * External provider options (what users pass in) + */ +export type ExternalTextProviderOptions = GroqTextProviderOptions + +/** + * Validates text provider options. + * Basic validation stub — Groq API handles detailed validation. + */ +export function validateTextProviderOptions( + _options: InternalTextProviderOptions, +): void { + // Groq API handles detailed validation +} diff --git a/packages/typescript/ai-groq/src/tools/function-tool.ts b/packages/typescript/ai-groq/src/tools/function-tool.ts new file mode 100644 index 00000000..4d88e08a --- /dev/null +++ b/packages/typescript/ai-groq/src/tools/function-tool.ts @@ -0,0 +1,39 @@ +import { makeGroqStructuredOutputCompatible } from '../utils/schema-converter' +import type { JSONSchema, Tool } from '@tanstack/ai' +import type { ChatCompletionTool } from '../message-types' + +export type FunctionTool = ChatCompletionTool + +/** + * Converts a standard Tool to Groq ChatCompletionTool format. + * + * Tool schemas are already converted to JSON Schema in the ai layer. + * We apply Groq-specific transformations for strict mode: + * - All properties in required array + * - Optional fields made nullable + * - additionalProperties: false + */ +export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { + const inputSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + const jsonSchema = makeGroqStructuredOutputCompatible( + inputSchema, + inputSchema.required || [], + ) + + jsonSchema.additionalProperties = false + + return { + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: jsonSchema, + strict: true, + }, + } satisfies FunctionTool +} diff --git a/packages/typescript/ai-groq/src/tools/index.ts b/packages/typescript/ai-groq/src/tools/index.ts new file mode 100644 index 00000000..f9227d7b --- /dev/null +++ b/packages/typescript/ai-groq/src/tools/index.ts @@ -0,0 +1,5 @@ +export { + convertFunctionToolToAdapterFormat, + type FunctionTool, +} from './function-tool' +export { convertToolsToProviderFormat } from './tool-converter' diff --git a/packages/typescript/ai-groq/src/tools/tool-converter.ts b/packages/typescript/ai-groq/src/tools/tool-converter.ts new file mode 100644 index 00000000..451215c8 --- /dev/null +++ b/packages/typescript/ai-groq/src/tools/tool-converter.ts @@ -0,0 +1,15 @@ +import { convertFunctionToolToAdapterFormat } from './function-tool' +import type { FunctionTool } from './function-tool' +import type { Tool } from '@tanstack/ai' + +/** + * Converts an array of standard Tools to Groq-specific format. + * Groq uses an OpenAI-compatible API, so we primarily support function tools. + */ +export function convertToolsToProviderFormat( + tools: Array, +): Array { + return tools.map((tool) => { + return convertFunctionToolToAdapterFormat(tool) + }) +} diff --git a/packages/typescript/ai-groq/src/utils/index.ts b/packages/typescript/ai-groq/src/utils/index.ts new file mode 100644 index 00000000..2c3f3d72 --- /dev/null +++ b/packages/typescript/ai-groq/src/utils/index.ts @@ -0,0 +1,10 @@ +export { + createGroqClient, + getGroqApiKeyFromEnv, + generateId, + type GroqClientConfig, +} from './client' +export { + makeGroqStructuredOutputCompatible, + transformNullsToUndefined, +} from './schema-converter' diff --git a/packages/typescript/ai-groq/src/utils/schema-converter.ts b/packages/typescript/ai-groq/src/utils/schema-converter.ts new file mode 100644 index 00000000..85b6371a --- /dev/null +++ b/packages/typescript/ai-groq/src/utils/schema-converter.ts @@ -0,0 +1,102 @@ +/** + * Recursively transform null values to undefined in an object. + * + * This is needed because Groq's structured output requires all fields to be + * in the `required` array, with optional fields made nullable (type: ["string", "null"]). + * When Groq returns null for optional fields, we need to convert them back to + * undefined to match the original Zod schema expectations. + * + * @param obj - Object to transform + * @returns Object with nulls converted to undefined + */ +export function transformNullsToUndefined(obj: T): T { + if (obj === null) { + return undefined as unknown as T + } + + if (Array.isArray(obj)) { + return obj.map((item) => transformNullsToUndefined(item)) as unknown as T + } + + if (typeof obj === 'object') { + const result: Record = {} + for (const [key, value] of Object.entries(obj as Record)) { + const transformed = transformNullsToUndefined(value) + if (transformed !== undefined) { + result[key] = transformed + } + } + return result as T + } + + return obj +} + +/** + * Transform a JSON schema to be compatible with Groq's structured output requirements. + * + * Groq requires: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for objects + * + * @param schema - JSON schema to transform + * @param originalRequired - Original required array (to know which fields were optional) + * @returns Transformed schema compatible with Groq structured output + */ +export function makeGroqStructuredOutputCompatible( + schema: Record, + originalRequired: Array = [], +): Record { + const result = { ...schema } + + if (result.type === 'object' && result.properties) { + const properties = { ...result.properties } + const allPropertyNames = Object.keys(properties) + + for (const propName of allPropertyNames) { + const prop = properties[propName] + const wasOptional = !originalRequired.includes(propName) + + if (prop.type === 'object' && prop.properties) { + properties[propName] = makeGroqStructuredOutputCompatible( + prop, + prop.required || [], + ) + } else if (prop.type === 'array' && prop.items) { + properties[propName] = { + ...prop, + items: makeGroqStructuredOutputCompatible( + prop.items, + prop.items.required || [], + ), + } + } else if (wasOptional) { + if (prop.type && !Array.isArray(prop.type)) { + properties[propName] = { + ...prop, + type: [prop.type, 'null'], + } + } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { + properties[propName] = { + ...prop, + type: [...prop.type, 'null'], + } + } + } + } + + result.properties = properties + result.required = allPropertyNames + result.additionalProperties = false + } + + if (result.type === 'array' && result.items) { + result.items = makeGroqStructuredOutputCompatible( + result.items, + result.items.required || [], + ) + } + + return result +} diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts new file mode 100644 index 00000000..96bc2d76 --- /dev/null +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -0,0 +1,581 @@ +import { describe, it, expect, vi, afterEach, beforeEach, type Mock } from 'vitest' +import { createGroqText, groqText } from '../src/adapters/text' +import type { StreamChunk, Tool } from '@tanstack/ai' + +// Declare mockCreate at module level +let mockCreate: Mock<(...args: Array) => unknown> + +// Mock the Groq SDK +vi.mock('groq-sdk', () => { + return { + default: class { + chat = { + completions: { + create: (...args: Array) => mockCreate(...args), + }, + } + }, + } +}) + +// Helper to create async iterable from chunks +function createAsyncIterable(chunks: Array): AsyncIterable { + return { + [Symbol.asyncIterator]() { + let index = 0 + return { + async next() { + if (index < chunks.length) { + return { value: chunks[index++]!, done: false } + } + return { value: undefined as T, done: true } + }, + } + }, + } +} + +// Helper to setup the mock SDK client for streaming responses +function setupMockSdkClient( + streamChunks: Array>, + nonStreamResponse?: Record, +) { + mockCreate = vi.fn().mockImplementation((params) => { + if (params.stream) { + return Promise.resolve(createAsyncIterable(streamChunks)) + } + return Promise.resolve(nonStreamResponse) + }) +} + +const weatherTool: Tool = { + name: 'lookup_weather', + description: 'Return the forecast for a location', +} + +describe('Groq adapters', () => { + afterEach(() => { + vi.unstubAllEnvs() + }) + + describe('Text adapter', () => { + it('creates a text adapter with explicit API key', () => { + const adapter = createGroqText( + 'llama-3.3-70b-versatile', + 'test-api-key', + ) + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.name).toBe('groq') + expect(adapter.model).toBe('llama-3.3-70b-versatile') + }) + + it('creates a text adapter from environment variable', () => { + vi.stubEnv('GROQ_API_KEY', 'env-api-key') + + const adapter = groqText('llama-3.1-8b-instant') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.model).toBe('llama-3.1-8b-instant') + }) + + it('throws if GROQ_API_KEY is not set when using groqText', () => { + vi.stubEnv('GROQ_API_KEY', '') + + expect(() => groqText('llama-3.3-70b-versatile')).toThrow( + 'GROQ_API_KEY is required', + ) + }) + + it('allows custom baseURL override', () => { + const adapter = createGroqText( + 'llama-3.3-70b-versatile', + 'test-api-key', + { + baseURL: 'https://custom.api.example.com/v1', + }, + ) + + expect(adapter).toBeDefined() + }) + }) +}) + +describe('Groq AG-UI event emission', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + afterEach(() => { + vi.unstubAllEnvs() + }) + + it('emits RUN_STARTED as the first event', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 5, + completion_tokens: 1, + total_tokens: 6, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText( + 'llama-3.3-70b-versatile', + 'test-api-key', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + expect(chunks[0]?.type).toBe('RUN_STARTED') + if (chunks[0]?.type === 'RUN_STARTED') { + expect(chunks[0].runId).toBeDefined() + expect(chunks[0].model).toBe('llama-3.3-70b-versatile') + } + }) + + it('emits TEXT_MESSAGE_START before TEXT_MESSAGE_CONTENT', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 5, + completion_tokens: 1, + total_tokens: 6, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText( + 'llama-3.3-70b-versatile', + 'test-api-key', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const textStartIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_START', + ) + const textContentIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + + expect(textStartIndex).toBeGreaterThan(-1) + expect(textContentIndex).toBeGreaterThan(-1) + expect(textStartIndex).toBeLessThan(textContentIndex) + + const textStart = chunks[textStartIndex] + if (textStart?.type === 'TEXT_MESSAGE_START') { + expect(textStart.messageId).toBeDefined() + expect(textStart.role).toBe('assistant') + } + }) + + it('emits TEXT_MESSAGE_END and RUN_FINISHED at the end', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 5, + completion_tokens: 1, + total_tokens: 6, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText( + 'llama-3.3-70b-versatile', + 'test-api-key', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const textEndChunk = chunks.find((c) => c.type === 'TEXT_MESSAGE_END') + expect(textEndChunk).toBeDefined() + if (textEndChunk?.type === 'TEXT_MESSAGE_END') { + expect(textEndChunk.messageId).toBeDefined() + } + + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + expect(runFinishedChunk).toBeDefined() + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.runId).toBeDefined() + expect(runFinishedChunk.finishReason).toBe('stop') + expect(runFinishedChunk.usage).toMatchObject({ + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }) + } + }) + + it('emits AG-UI tool call events', async () => { + const streamChunks = [ + { + id: 'chatcmpl-456', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { + tool_calls: [ + { + index: 0, + id: 'call_abc123', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '{"location":', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-456', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { + tool_calls: [ + { + index: 0, + function: { + arguments: '"Berlin"}', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-456', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'tool_calls', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 5, + total_tokens: 15, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText( + 'llama-3.3-70b-versatile', + 'test-api-key', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Weather in Berlin?' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + // Check AG-UI tool events + const toolStartChunk = chunks.find((c) => c.type === 'TOOL_CALL_START') + expect(toolStartChunk).toBeDefined() + if (toolStartChunk?.type === 'TOOL_CALL_START') { + expect(toolStartChunk.toolCallId).toBe('call_abc123') + expect(toolStartChunk.toolName).toBe('lookup_weather') + } + + const toolArgsChunks = chunks.filter((c) => c.type === 'TOOL_CALL_ARGS') + expect(toolArgsChunks.length).toBeGreaterThan(0) + + const toolEndChunk = chunks.find((c) => c.type === 'TOOL_CALL_END') + expect(toolEndChunk).toBeDefined() + if (toolEndChunk?.type === 'TOOL_CALL_END') { + expect(toolEndChunk.toolCallId).toBe('call_abc123') + expect(toolEndChunk.toolName).toBe('lookup_weather') + expect(toolEndChunk.input).toEqual({ location: 'Berlin' }) + } + + // Check finish reason + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('tool_calls') + } + }) + + it('emits RUN_ERROR on stream error', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + ] + + // Create an async iterable that throws mid-stream + const errorIterable = { + [Symbol.asyncIterator]() { + let index = 0 + return { + async next() { + if (index < streamChunks.length) { + return { value: streamChunks[index++]!, done: false } + } + throw new Error('Stream interrupted') + }, + } + }, + } + + mockCreate = vi.fn().mockResolvedValue(errorIterable) + + const adapter = createGroqText( + 'llama-3.3-70b-versatile', + 'test-api-key', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Should emit RUN_ERROR + const runErrorChunk = chunks.find((c) => c.type === 'RUN_ERROR') + expect(runErrorChunk).toBeDefined() + if (runErrorChunk?.type === 'RUN_ERROR') { + expect(runErrorChunk.error.message).toBe('Stream interrupted') + } + }) + + it('emits proper AG-UI event sequence', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 5, + completion_tokens: 2, + total_tokens: 7, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText( + 'llama-3.3-70b-versatile', + 'test-api-key', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Verify proper AG-UI event sequence + const eventTypes = chunks.map((c) => c.type) + + // Should start with RUN_STARTED + expect(eventTypes[0]).toBe('RUN_STARTED') + + // Should have TEXT_MESSAGE_START before TEXT_MESSAGE_CONTENT + const textStartIndex = eventTypes.indexOf('TEXT_MESSAGE_START') + const textContentIndex = eventTypes.indexOf('TEXT_MESSAGE_CONTENT') + expect(textStartIndex).toBeGreaterThan(-1) + expect(textContentIndex).toBeGreaterThan(textStartIndex) + + // Should have TEXT_MESSAGE_END before RUN_FINISHED + const textEndIndex = eventTypes.indexOf('TEXT_MESSAGE_END') + const runFinishedIndex = eventTypes.indexOf('RUN_FINISHED') + expect(textEndIndex).toBeGreaterThan(-1) + expect(runFinishedIndex).toBeGreaterThan(textEndIndex) + + // Verify RUN_FINISHED has proper data + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('stop') + expect(runFinishedChunk.usage).toBeDefined() + } + }) + + it('streams content with correct accumulated values', async () => { + const streamChunks = [ + { + id: 'chatcmpl-stream', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello ' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-stream', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-stream', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 5, + completion_tokens: 2, + total_tokens: 7, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText( + 'llama-3.3-70b-versatile', + 'test-api-key', + ) + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Say hello' }], + })) { + chunks.push(chunk) + } + + // Check TEXT_MESSAGE_CONTENT events have correct accumulated content + const contentChunks = chunks.filter( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + expect(contentChunks.length).toBe(2) + + const firstContent = contentChunks[0] + if (firstContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(firstContent.delta).toBe('Hello ') + expect(firstContent.content).toBe('Hello ') + } + + const secondContent = contentChunks[1] + if (secondContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(secondContent.delta).toBe('world') + expect(secondContent.content).toBe('Hello world') + } + }) +}) From bb0427052208ab87ebb9b51cd170ac71a6a231b5 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 9 Feb 2026 12:33:02 +0100 Subject: [PATCH 08/16] feat: opus 4.6 model & additional config for provider clients (#278) * feat: opus 4.6 model & additional config for provider clients * fix: isue with gemini adapter --- .changeset/slimy-ways-wave.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changeset/slimy-ways-wave.md diff --git a/.changeset/slimy-ways-wave.md b/.changeset/slimy-ways-wave.md new file mode 100644 index 00000000..06404a7d --- /dev/null +++ b/.changeset/slimy-ways-wave.md @@ -0,0 +1,7 @@ +--- +'@tanstack/ai-anthropic': patch +'@tanstack/ai-gemini': patch +'@tanstack/ai-grok': patch +--- + +Add in opus 4.6 and enhance acceptable config options by providers From a59839351a200c2a85cb8e4d11d9761a2ab4ef02 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 12 Feb 2026 09:25:30 +0100 Subject: [PATCH 09/16] ci: Version Packages (#283) * ci: Version Packages * fix version numbers * fix changelogs * ci: apply automated fixes --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Jack Herrington Co-authored-by: Alem Tuzlak Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../fix-anthropic-multi-turn-tool-calls.md | 35 ------------- .changeset/tighten-adapter-contract.md | 25 --------- examples/ts-svelte-chat/CHANGELOG.md | 13 +++++ examples/ts-svelte-chat/package.json | 2 +- examples/ts-vue-chat/CHANGELOG.md | 14 +++++ examples/ts-vue-chat/package.json | 2 +- examples/vanilla-chat/CHANGELOG.md | 7 +++ examples/vanilla-chat/package.json | 2 +- packages/typescript/ai-anthropic/CHANGELOG.md | 51 +++++++++++++++++++ packages/typescript/ai-anthropic/package.json | 2 +- packages/typescript/ai-client/CHANGELOG.md | 32 ++++++++++++ packages/typescript/ai-client/package.json | 2 +- packages/typescript/ai-devtools/CHANGELOG.md | 7 +++ packages/typescript/ai-devtools/package.json | 2 +- packages/typescript/ai-gemini/CHANGELOG.md | 51 +++++++++++++++++++ packages/typescript/ai-gemini/package.json | 2 +- packages/typescript/ai-grok/CHANGELOG.md | 7 +++ packages/typescript/ai-grok/package.json | 2 +- packages/typescript/ai-ollama/CHANGELOG.md | 7 +++ packages/typescript/ai-ollama/package.json | 2 +- packages/typescript/ai-openai/CHANGELOG.md | 7 +++ packages/typescript/ai-openai/package.json | 2 +- .../typescript/ai-openrouter/CHANGELOG.md | 7 +++ .../typescript/ai-openrouter/package.json | 2 +- packages/typescript/ai-preact/CHANGELOG.md | 8 +++ packages/typescript/ai-preact/package.json | 2 +- packages/typescript/ai-react-ui/CHANGELOG.md | 8 +++ packages/typescript/ai-react-ui/package.json | 2 +- packages/typescript/ai-react/CHANGELOG.md | 8 +++ packages/typescript/ai-react/package.json | 2 +- packages/typescript/ai-solid-ui/CHANGELOG.md | 8 +++ packages/typescript/ai-solid-ui/package.json | 2 +- packages/typescript/ai-solid/CHANGELOG.md | 8 +++ packages/typescript/ai-solid/package.json | 2 +- packages/typescript/ai-svelte/CHANGELOG.md | 8 +++ packages/typescript/ai-svelte/package.json | 2 +- packages/typescript/ai-vue-ui/CHANGELOG.md | 7 +++ packages/typescript/ai-vue-ui/package.json | 2 +- packages/typescript/ai-vue/CHANGELOG.md | 8 +++ packages/typescript/ai-vue/package.json | 2 +- packages/typescript/ai/CHANGELOG.md | 48 +++++++++++++++++ packages/typescript/ai/package.json | 2 +- .../preact-ai-devtools/CHANGELOG.md | 7 +++ .../preact-ai-devtools/package.json | 2 +- .../typescript/react-ai-devtools/CHANGELOG.md | 7 +++ .../typescript/react-ai-devtools/package.json | 2 +- .../smoke-tests/adapters/CHANGELOG.md | 13 +++++ .../smoke-tests/adapters/package.json | 2 +- .../typescript/smoke-tests/e2e/CHANGELOG.md | 11 ++++ .../typescript/smoke-tests/e2e/package.json | 2 +- .../typescript/solid-ai-devtools/CHANGELOG.md | 7 +++ .../typescript/solid-ai-devtools/package.json | 2 +- 52 files changed, 384 insertions(+), 85 deletions(-) delete mode 100644 .changeset/fix-anthropic-multi-turn-tool-calls.md delete mode 100644 .changeset/tighten-adapter-contract.md diff --git a/.changeset/fix-anthropic-multi-turn-tool-calls.md b/.changeset/fix-anthropic-multi-turn-tool-calls.md deleted file mode 100644 index 34553ece..00000000 --- a/.changeset/fix-anthropic-multi-turn-tool-calls.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -'@tanstack/ai': patch -'@tanstack/ai-client': patch -'@tanstack/ai-anthropic': patch -'@tanstack/ai-gemini': patch ---- - -fix(ai, ai-client, ai-anthropic, ai-gemini): fix multi-turn conversations failing after tool calls - -**Core (@tanstack/ai):** - -- Lazy assistant message creation: `StreamProcessor` now defers creating the assistant message until the first content-bearing chunk arrives (text, tool call, thinking, or error), eliminating empty `parts: []` messages from appearing during auto-continuation when the model returns no content -- Add `prepareAssistantMessage()` (lazy) alongside deprecated `startAssistantMessage()` (eager, backwards-compatible) -- Add `getCurrentAssistantMessageId()` to check if a message was created -- **Rewrite `uiMessageToModelMessages()` to preserve part ordering**: the function now walks parts sequentially instead of separating by type, producing correctly interleaved assistant/tool messages (text1 + toolCall1 → toolResult1 → text2 + toolCall2 → toolResult2) instead of concatenating all text and batching all tool calls. This fixes multi-round tool flows where the model would see garbled conversation history and re-call tools unnecessarily. -- Deduplicate tool result messages: when a client tool has both a `tool-result` part and a `tool-call` part with `output`, only one `role: 'tool'` message is emitted per tool call ID - -**Client (@tanstack/ai-client):** - -- Update `ChatClient.processStream()` to use lazy assistant message creation, preventing UI flicker from empty messages being created then removed - -**Anthropic:** - -- Fix consecutive user-role messages violating Anthropic's alternating role requirement by merging them in `formatMessages` -- Deduplicate `tool_result` blocks with the same `tool_use_id` -- Filter out empty assistant messages from conversation history -- Suppress duplicate `RUN_FINISHED` event from `message_stop` when `message_delta` already emitted one -- Fix `TEXT_MESSAGE_END` incorrectly emitting for `tool_use` content blocks -- Add Claude Opus 4.6 model support with adaptive thinking and effort parameter - -**Gemini:** - -- Fix consecutive user-role messages violating Gemini's alternating role requirement by merging them in `formatMessages` -- Deduplicate `functionResponse` parts with the same name (tool call ID) -- Filter out empty model messages from conversation history diff --git a/.changeset/tighten-adapter-contract.md b/.changeset/tighten-adapter-contract.md deleted file mode 100644 index 78a7af3f..00000000 --- a/.changeset/tighten-adapter-contract.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -'@tanstack/ai': minor -'@tanstack/ai-anthropic': minor -'@tanstack/ai-gemini': minor ---- - -Tighten the AG-UI adapter contract and simplify the core stream processor. - -**Breaking type changes:** - -- `TextMessageContentEvent.delta` is now required (was optional) -- `StepFinishedEvent.delta` is now required (was optional) - -All first-party adapters already sent `delta` on every event, so this is a type-level enforcement of existing behavior. Community adapters that follow the reference implementations will not need code changes. - -**Core processor simplifications:** - -- `TEXT_MESSAGE_START` now resets text segment state, replacing heuristic overlap detection -- `TOOL_CALL_END` is now the authoritative signal for tool call input completion -- Removed delta/content fallback logic, whitespace-only message cleanup, and finish-reason conflict arbitration from the processor - -**Adapter fixes:** - -- Gemini: filter whitespace-only text parts, fix STEP_FINISHED content accumulation, emit fresh TEXT_MESSAGE_START after tool calls -- Anthropic: emit fresh TEXT_MESSAGE_START after tool_use blocks for proper text segmentation diff --git a/examples/ts-svelte-chat/CHANGELOG.md b/examples/ts-svelte-chat/CHANGELOG.md index 8a0ab92c..19dd8e31 100644 --- a/examples/ts-svelte-chat/CHANGELOG.md +++ b/examples/ts-svelte-chat/CHANGELOG.md @@ -1,5 +1,18 @@ # ts-svelte-chat +## 0.1.11 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + - @tanstack/ai-client@0.4.4 + - @tanstack/ai-anthropic@0.5.0 + - @tanstack/ai-gemini@0.5.0 + - @tanstack/ai-ollama@0.5.0 + - @tanstack/ai-openai@0.5.0 + - @tanstack/ai-svelte@0.5.0 + ## 0.1.10 ### Patch Changes diff --git a/examples/ts-svelte-chat/package.json b/examples/ts-svelte-chat/package.json index c98dcaa0..acab1ed5 100644 --- a/examples/ts-svelte-chat/package.json +++ b/examples/ts-svelte-chat/package.json @@ -1,7 +1,7 @@ { "name": "ts-svelte-chat", "private": true, - "version": "0.1.10", + "version": "0.1.11", "type": "module", "scripts": { "dev": "vite dev --port 3000", diff --git a/examples/ts-vue-chat/CHANGELOG.md b/examples/ts-vue-chat/CHANGELOG.md index 9d5142ad..94a6bed4 100644 --- a/examples/ts-vue-chat/CHANGELOG.md +++ b/examples/ts-vue-chat/CHANGELOG.md @@ -1,5 +1,19 @@ # ts-vue-chat +## 0.1.11 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + - @tanstack/ai-client@0.4.4 + - @tanstack/ai-anthropic@0.5.0 + - @tanstack/ai-gemini@0.5.0 + - @tanstack/ai-ollama@0.5.0 + - @tanstack/ai-openai@0.5.0 + - @tanstack/ai-vue@0.5.0 + - @tanstack/ai-vue-ui@0.1.9 + ## 0.1.10 ### Patch Changes diff --git a/examples/ts-vue-chat/package.json b/examples/ts-vue-chat/package.json index 39710b4d..e94fe96e 100644 --- a/examples/ts-vue-chat/package.json +++ b/examples/ts-vue-chat/package.json @@ -1,6 +1,6 @@ { "name": "ts-vue-chat", - "version": "0.1.10", + "version": "0.1.11", "private": true, "type": "module", "scripts": { diff --git a/examples/vanilla-chat/CHANGELOG.md b/examples/vanilla-chat/CHANGELOG.md index 999f574e..17433a34 100644 --- a/examples/vanilla-chat/CHANGELOG.md +++ b/examples/vanilla-chat/CHANGELOG.md @@ -1,5 +1,12 @@ # vanilla-chat +## 0.0.13 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai-client@0.4.4 + ## 0.0.12 ### Patch Changes diff --git a/examples/vanilla-chat/package.json b/examples/vanilla-chat/package.json index 32151544..d4ba014f 100644 --- a/examples/vanilla-chat/package.json +++ b/examples/vanilla-chat/package.json @@ -2,7 +2,7 @@ "name": "vanilla-chat", "private": true, "type": "module", - "version": "0.0.12", + "version": "0.0.13", "scripts": { "start": "vite --port 3001", "dev": "vite --port 3001", diff --git a/packages/typescript/ai-anthropic/CHANGELOG.md b/packages/typescript/ai-anthropic/CHANGELOG.md index fc6093a8..245ab88c 100644 --- a/packages/typescript/ai-anthropic/CHANGELOG.md +++ b/packages/typescript/ai-anthropic/CHANGELOG.md @@ -1,5 +1,56 @@ # @tanstack/ai-anthropic +## 0.5.0 + +### Minor Changes + +- Tighten the AG-UI adapter contract and simplify the core stream processor. ([#275](https://github.com/TanStack/ai/pull/275)) + + **Breaking type changes:** + - `TextMessageContentEvent.delta` is now required (was optional) + - `StepFinishedEvent.delta` is now required (was optional) + + All first-party adapters already sent `delta` on every event, so this is a type-level enforcement of existing behavior. Community adapters that follow the reference implementations will not need code changes. + + **Core processor simplifications:** + - `TEXT_MESSAGE_START` now resets text segment state, replacing heuristic overlap detection + - `TOOL_CALL_END` is now the authoritative signal for tool call input completion + - Removed delta/content fallback logic, whitespace-only message cleanup, and finish-reason conflict arbitration from the processor + + **Adapter fixes:** + - Gemini: filter whitespace-only text parts, fix STEP_FINISHED content accumulation, emit fresh TEXT_MESSAGE_START after tool calls + - Anthropic: emit fresh TEXT_MESSAGE_START after tool_use blocks for proper text segmentation + +### Patch Changes + +- fix(ai, ai-client, ai-anthropic, ai-gemini): fix multi-turn conversations failing after tool calls ([#275](https://github.com/TanStack/ai/pull/275)) + + **Core (@tanstack/ai):** + - Lazy assistant message creation: `StreamProcessor` now defers creating the assistant message until the first content-bearing chunk arrives (text, tool call, thinking, or error), eliminating empty `parts: []` messages from appearing during auto-continuation when the model returns no content + - Add `prepareAssistantMessage()` (lazy) alongside deprecated `startAssistantMessage()` (eager, backwards-compatible) + - Add `getCurrentAssistantMessageId()` to check if a message was created + - **Rewrite `uiMessageToModelMessages()` to preserve part ordering**: the function now walks parts sequentially instead of separating by type, producing correctly interleaved assistant/tool messages (text1 + toolCall1 → toolResult1 → text2 + toolCall2 → toolResult2) instead of concatenating all text and batching all tool calls. This fixes multi-round tool flows where the model would see garbled conversation history and re-call tools unnecessarily. + - Deduplicate tool result messages: when a client tool has both a `tool-result` part and a `tool-call` part with `output`, only one `role: 'tool'` message is emitted per tool call ID + + **Client (@tanstack/ai-client):** + - Update `ChatClient.processStream()` to use lazy assistant message creation, preventing UI flicker from empty messages being created then removed + + **Anthropic:** + - Fix consecutive user-role messages violating Anthropic's alternating role requirement by merging them in `formatMessages` + - Deduplicate `tool_result` blocks with the same `tool_use_id` + - Filter out empty assistant messages from conversation history + - Suppress duplicate `RUN_FINISHED` event from `message_stop` when `message_delta` already emitted one + - Fix `TEXT_MESSAGE_END` incorrectly emitting for `tool_use` content blocks + - Add Claude Opus 4.6 model support with adaptive thinking and effort parameter + + **Gemini:** + - Fix consecutive user-role messages violating Gemini's alternating role requirement by merging them in `formatMessages` + - Deduplicate `functionResponse` parts with the same name (tool call ID) + - Filter out empty model messages from conversation history + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + ## 0.4.2 ### Patch Changes diff --git a/packages/typescript/ai-anthropic/package.json b/packages/typescript/ai-anthropic/package.json index eecbe2a6..84b31808 100644 --- a/packages/typescript/ai-anthropic/package.json +++ b/packages/typescript/ai-anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-anthropic", - "version": "0.4.2", + "version": "0.5.0", "description": "Anthropic Claude adapter for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-client/CHANGELOG.md b/packages/typescript/ai-client/CHANGELOG.md index 9f384597..3f5a9ad9 100644 --- a/packages/typescript/ai-client/CHANGELOG.md +++ b/packages/typescript/ai-client/CHANGELOG.md @@ -1,5 +1,37 @@ # @tanstack/ai-client +## 0.4.4 + +### Patch Changes + +- fix(ai, ai-client, ai-anthropic, ai-gemini): fix multi-turn conversations failing after tool calls ([#275](https://github.com/TanStack/ai/pull/275)) + + **Core (@tanstack/ai):** + - Lazy assistant message creation: `StreamProcessor` now defers creating the assistant message until the first content-bearing chunk arrives (text, tool call, thinking, or error), eliminating empty `parts: []` messages from appearing during auto-continuation when the model returns no content + - Add `prepareAssistantMessage()` (lazy) alongside deprecated `startAssistantMessage()` (eager, backwards-compatible) + - Add `getCurrentAssistantMessageId()` to check if a message was created + - **Rewrite `uiMessageToModelMessages()` to preserve part ordering**: the function now walks parts sequentially instead of separating by type, producing correctly interleaved assistant/tool messages (text1 + toolCall1 → toolResult1 → text2 + toolCall2 → toolResult2) instead of concatenating all text and batching all tool calls. This fixes multi-round tool flows where the model would see garbled conversation history and re-call tools unnecessarily. + - Deduplicate tool result messages: when a client tool has both a `tool-result` part and a `tool-call` part with `output`, only one `role: 'tool'` message is emitted per tool call ID + + **Client (@tanstack/ai-client):** + - Update `ChatClient.processStream()` to use lazy assistant message creation, preventing UI flicker from empty messages being created then removed + + **Anthropic:** + - Fix consecutive user-role messages violating Anthropic's alternating role requirement by merging them in `formatMessages` + - Deduplicate `tool_result` blocks with the same `tool_use_id` + - Filter out empty assistant messages from conversation history + - Suppress duplicate `RUN_FINISHED` event from `message_stop` when `message_delta` already emitted one + - Fix `TEXT_MESSAGE_END` incorrectly emitting for `tool_use` content blocks + - Add Claude Opus 4.6 model support with adaptive thinking and effort parameter + + **Gemini:** + - Fix consecutive user-role messages violating Gemini's alternating role requirement by merging them in `formatMessages` + - Deduplicate `functionResponse` parts with the same name (tool call ID) + - Filter out empty model messages from conversation history + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + ## 0.4.3 ### Patch Changes diff --git a/packages/typescript/ai-client/package.json b/packages/typescript/ai-client/package.json index 680e37f5..214facff 100644 --- a/packages/typescript/ai-client/package.json +++ b/packages/typescript/ai-client/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-client", - "version": "0.4.3", + "version": "0.4.4", "description": "Framework-agnostic headless client for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-devtools/CHANGELOG.md b/packages/typescript/ai-devtools/CHANGELOG.md index e6378e35..7ed1fef3 100644 --- a/packages/typescript/ai-devtools/CHANGELOG.md +++ b/packages/typescript/ai-devtools/CHANGELOG.md @@ -1,5 +1,12 @@ # @tanstack/ai-devtools-core +## 0.3.3 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + ## 0.3.2 ### Patch Changes diff --git a/packages/typescript/ai-devtools/package.json b/packages/typescript/ai-devtools/package.json index dc38c3c3..04e8ff08 100644 --- a/packages/typescript/ai-devtools/package.json +++ b/packages/typescript/ai-devtools/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-devtools-core", - "version": "0.3.2", + "version": "0.3.3", "description": "Core TanStack AI Devtools", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-gemini/CHANGELOG.md b/packages/typescript/ai-gemini/CHANGELOG.md index 69f753a6..88f0a300 100644 --- a/packages/typescript/ai-gemini/CHANGELOG.md +++ b/packages/typescript/ai-gemini/CHANGELOG.md @@ -1,5 +1,56 @@ # @tanstack/ai-gemini +## 0.5.0 + +### Minor Changes + +- Tighten the AG-UI adapter contract and simplify the core stream processor. ([#275](https://github.com/TanStack/ai/pull/275)) + + **Breaking type changes:** + - `TextMessageContentEvent.delta` is now required (was optional) + - `StepFinishedEvent.delta` is now required (was optional) + + All first-party adapters already sent `delta` on every event, so this is a type-level enforcement of existing behavior. Community adapters that follow the reference implementations will not need code changes. + + **Core processor simplifications:** + - `TEXT_MESSAGE_START` now resets text segment state, replacing heuristic overlap detection + - `TOOL_CALL_END` is now the authoritative signal for tool call input completion + - Removed delta/content fallback logic, whitespace-only message cleanup, and finish-reason conflict arbitration from the processor + + **Adapter fixes:** + - Gemini: filter whitespace-only text parts, fix STEP_FINISHED content accumulation, emit fresh TEXT_MESSAGE_START after tool calls + - Anthropic: emit fresh TEXT_MESSAGE_START after tool_use blocks for proper text segmentation + +### Patch Changes + +- fix(ai, ai-client, ai-anthropic, ai-gemini): fix multi-turn conversations failing after tool calls ([#275](https://github.com/TanStack/ai/pull/275)) + + **Core (@tanstack/ai):** + - Lazy assistant message creation: `StreamProcessor` now defers creating the assistant message until the first content-bearing chunk arrives (text, tool call, thinking, or error), eliminating empty `parts: []` messages from appearing during auto-continuation when the model returns no content + - Add `prepareAssistantMessage()` (lazy) alongside deprecated `startAssistantMessage()` (eager, backwards-compatible) + - Add `getCurrentAssistantMessageId()` to check if a message was created + - **Rewrite `uiMessageToModelMessages()` to preserve part ordering**: the function now walks parts sequentially instead of separating by type, producing correctly interleaved assistant/tool messages (text1 + toolCall1 → toolResult1 → text2 + toolCall2 → toolResult2) instead of concatenating all text and batching all tool calls. This fixes multi-round tool flows where the model would see garbled conversation history and re-call tools unnecessarily. + - Deduplicate tool result messages: when a client tool has both a `tool-result` part and a `tool-call` part with `output`, only one `role: 'tool'` message is emitted per tool call ID + + **Client (@tanstack/ai-client):** + - Update `ChatClient.processStream()` to use lazy assistant message creation, preventing UI flicker from empty messages being created then removed + + **Anthropic:** + - Fix consecutive user-role messages violating Anthropic's alternating role requirement by merging them in `formatMessages` + - Deduplicate `tool_result` blocks with the same `tool_use_id` + - Filter out empty assistant messages from conversation history + - Suppress duplicate `RUN_FINISHED` event from `message_stop` when `message_delta` already emitted one + - Fix `TEXT_MESSAGE_END` incorrectly emitting for `tool_use` content blocks + - Add Claude Opus 4.6 model support with adaptive thinking and effort parameter + + **Gemini:** + - Fix consecutive user-role messages violating Gemini's alternating role requirement by merging them in `formatMessages` + - Deduplicate `functionResponse` parts with the same name (tool call ID) + - Filter out empty model messages from conversation history + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + ## 0.4.1 ### Patch Changes diff --git a/packages/typescript/ai-gemini/package.json b/packages/typescript/ai-gemini/package.json index 6d5fb2aa..adc20440 100644 --- a/packages/typescript/ai-gemini/package.json +++ b/packages/typescript/ai-gemini/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-gemini", - "version": "0.4.1", + "version": "0.5.0", "description": "Google Gemini adapter for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-grok/CHANGELOG.md b/packages/typescript/ai-grok/CHANGELOG.md index 9c4dd05d..fdc55668 100644 --- a/packages/typescript/ai-grok/CHANGELOG.md +++ b/packages/typescript/ai-grok/CHANGELOG.md @@ -1,5 +1,12 @@ # @tanstack/ai-grok +## 0.5.0 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + ## 0.4.1 ### Patch Changes diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json index 2dfc1270..06960f25 100644 --- a/packages/typescript/ai-grok/package.json +++ b/packages/typescript/ai-grok/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-grok", - "version": "0.4.1", + "version": "0.5.0", "description": "Grok (xAI) adapter for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-ollama/CHANGELOG.md b/packages/typescript/ai-ollama/CHANGELOG.md index 0baf65de..516b50f3 100644 --- a/packages/typescript/ai-ollama/CHANGELOG.md +++ b/packages/typescript/ai-ollama/CHANGELOG.md @@ -1,5 +1,12 @@ # @tanstack/ai-ollama +## 0.5.0 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + ## 0.4.0 ### Patch Changes diff --git a/packages/typescript/ai-ollama/package.json b/packages/typescript/ai-ollama/package.json index 66a89e97..e1d96052 100644 --- a/packages/typescript/ai-ollama/package.json +++ b/packages/typescript/ai-ollama/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-ollama", - "version": "0.4.0", + "version": "0.5.0", "description": "Ollama adapter for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-openai/CHANGELOG.md b/packages/typescript/ai-openai/CHANGELOG.md index 58bb832d..9fa7490e 100644 --- a/packages/typescript/ai-openai/CHANGELOG.md +++ b/packages/typescript/ai-openai/CHANGELOG.md @@ -1,5 +1,12 @@ # @tanstack/ai-openai +## 0.5.0 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + ## 0.4.0 ### Patch Changes diff --git a/packages/typescript/ai-openai/package.json b/packages/typescript/ai-openai/package.json index f6c6d374..ba40cee7 100644 --- a/packages/typescript/ai-openai/package.json +++ b/packages/typescript/ai-openai/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-openai", - "version": "0.4.0", + "version": "0.5.0", "description": "OpenAI adapter for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-openrouter/CHANGELOG.md b/packages/typescript/ai-openrouter/CHANGELOG.md index acc6a33f..c7394155 100644 --- a/packages/typescript/ai-openrouter/CHANGELOG.md +++ b/packages/typescript/ai-openrouter/CHANGELOG.md @@ -1,5 +1,12 @@ # @tanstack/ai-openrouter +## 0.5.0 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + ## 0.4.2 ### Patch Changes diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json index 7147daf2..6db6744a 100644 --- a/packages/typescript/ai-openrouter/package.json +++ b/packages/typescript/ai-openrouter/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-openrouter", - "version": "0.4.2", + "version": "0.5.0", "description": "OpenRouter adapter for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-preact/CHANGELOG.md b/packages/typescript/ai-preact/CHANGELOG.md index 53e0b91f..ff3b3316 100644 --- a/packages/typescript/ai-preact/CHANGELOG.md +++ b/packages/typescript/ai-preact/CHANGELOG.md @@ -1,5 +1,13 @@ # @tanstack/ai-preact +## 0.5.3 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + - @tanstack/ai-client@0.4.4 + ## 0.5.2 ### Patch Changes diff --git a/packages/typescript/ai-preact/package.json b/packages/typescript/ai-preact/package.json index 62c8330f..9209dbe3 100644 --- a/packages/typescript/ai-preact/package.json +++ b/packages/typescript/ai-preact/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-preact", - "version": "0.5.2", + "version": "0.5.3", "description": "Preact hooks for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-react-ui/CHANGELOG.md b/packages/typescript/ai-react-ui/CHANGELOG.md index 3b214471..a967661e 100644 --- a/packages/typescript/ai-react-ui/CHANGELOG.md +++ b/packages/typescript/ai-react-ui/CHANGELOG.md @@ -1,5 +1,13 @@ # @tanstack/ai-react-ui +## 0.5.1 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai-client@0.4.4 + - @tanstack/ai-react@0.5.1 + ## 0.5.0 ### Patch Changes diff --git a/packages/typescript/ai-react-ui/package.json b/packages/typescript/ai-react-ui/package.json index 26b6c5ea..d3c17e6e 100644 --- a/packages/typescript/ai-react-ui/package.json +++ b/packages/typescript/ai-react-ui/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-react-ui", - "version": "0.5.0", + "version": "0.5.1", "description": "Headless React components for building AI chat interfaces", "module": "./dist/esm/index.js", "types": "./dist/esm/index.d.ts", diff --git a/packages/typescript/ai-react/CHANGELOG.md b/packages/typescript/ai-react/CHANGELOG.md index f1a56024..3e5aa7fa 100644 --- a/packages/typescript/ai-react/CHANGELOG.md +++ b/packages/typescript/ai-react/CHANGELOG.md @@ -1,5 +1,13 @@ # @tanstack/ai-react +## 0.5.3 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + - @tanstack/ai-client@0.4.4 + ## 0.5.2 ### Patch Changes diff --git a/packages/typescript/ai-react/package.json b/packages/typescript/ai-react/package.json index a9036014..4dedb169 100644 --- a/packages/typescript/ai-react/package.json +++ b/packages/typescript/ai-react/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-react", - "version": "0.5.2", + "version": "0.5.3", "description": "React hooks for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-solid-ui/CHANGELOG.md b/packages/typescript/ai-solid-ui/CHANGELOG.md index 7bbbd8ff..1432490e 100644 --- a/packages/typescript/ai-solid-ui/CHANGELOG.md +++ b/packages/typescript/ai-solid-ui/CHANGELOG.md @@ -1,5 +1,13 @@ # @tanstack/ai-solid-ui +## 0.5.1 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai-client@0.4.4 + - @tanstack/ai-solid@0.5.1 + ## 0.5.0 ### Patch Changes diff --git a/packages/typescript/ai-solid-ui/package.json b/packages/typescript/ai-solid-ui/package.json index c815b937..00ee0849 100644 --- a/packages/typescript/ai-solid-ui/package.json +++ b/packages/typescript/ai-solid-ui/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-solid-ui", - "version": "0.5.0", + "version": "0.5.1", "description": "Headless Solid components for building AI chat interfaces", "module": "./src/index.ts", "types": "./src/index.ts", diff --git a/packages/typescript/ai-solid/CHANGELOG.md b/packages/typescript/ai-solid/CHANGELOG.md index 92956adb..a9d57726 100644 --- a/packages/typescript/ai-solid/CHANGELOG.md +++ b/packages/typescript/ai-solid/CHANGELOG.md @@ -1,5 +1,13 @@ # @tanstack/ai-solid +## 0.5.3 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + - @tanstack/ai-client@0.4.4 + ## 0.5.2 ### Patch Changes diff --git a/packages/typescript/ai-solid/package.json b/packages/typescript/ai-solid/package.json index 8d84f46d..4cde9784 100644 --- a/packages/typescript/ai-solid/package.json +++ b/packages/typescript/ai-solid/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-solid", - "version": "0.5.2", + "version": "0.5.3", "description": "Solid hooks for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-svelte/CHANGELOG.md b/packages/typescript/ai-svelte/CHANGELOG.md index 47e82746..7912fe49 100644 --- a/packages/typescript/ai-svelte/CHANGELOG.md +++ b/packages/typescript/ai-svelte/CHANGELOG.md @@ -1,5 +1,13 @@ # @tanstack/ai-svelte +## 0.5.3 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + - @tanstack/ai-client@0.4.4 + ## 0.5.2 ### Patch Changes diff --git a/packages/typescript/ai-svelte/package.json b/packages/typescript/ai-svelte/package.json index 7f376169..8f49e3e3 100644 --- a/packages/typescript/ai-svelte/package.json +++ b/packages/typescript/ai-svelte/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-svelte", - "version": "0.5.2", + "version": "0.5.3", "description": "Svelte bindings for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai-vue-ui/CHANGELOG.md b/packages/typescript/ai-vue-ui/CHANGELOG.md index 58a28334..7348ea9c 100644 --- a/packages/typescript/ai-vue-ui/CHANGELOG.md +++ b/packages/typescript/ai-vue-ui/CHANGELOG.md @@ -1,5 +1,12 @@ # @tanstack/ai-vue-ui +## 0.1.9 + +### Patch Changes + +- Updated dependencies []: + - @tanstack/ai-vue@0.5.3 + ## 0.1.8 ### Patch Changes diff --git a/packages/typescript/ai-vue-ui/package.json b/packages/typescript/ai-vue-ui/package.json index 57902f37..90dd2582 100644 --- a/packages/typescript/ai-vue-ui/package.json +++ b/packages/typescript/ai-vue-ui/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-vue-ui", - "version": "0.1.8", + "version": "0.1.9", "description": "Headless Vue components for building AI chat interfaces", "module": "./src/index.ts", "types": "./src/index.ts", diff --git a/packages/typescript/ai-vue/CHANGELOG.md b/packages/typescript/ai-vue/CHANGELOG.md index 5bdddd1a..665c9cfd 100644 --- a/packages/typescript/ai-vue/CHANGELOG.md +++ b/packages/typescript/ai-vue/CHANGELOG.md @@ -1,5 +1,13 @@ # @tanstack/ai-vue +## 0.5.3 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + - @tanstack/ai-client@0.4.4 + ## 0.5.2 ### Patch Changes diff --git a/packages/typescript/ai-vue/package.json b/packages/typescript/ai-vue/package.json index 4f4615f8..6f71c5e6 100644 --- a/packages/typescript/ai-vue/package.json +++ b/packages/typescript/ai-vue/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai-vue", - "version": "0.5.2", + "version": "0.5.3", "description": "Vue hooks for TanStack AI", "author": "", "license": "MIT", diff --git a/packages/typescript/ai/CHANGELOG.md b/packages/typescript/ai/CHANGELOG.md index dfc5e613..4c15252c 100644 --- a/packages/typescript/ai/CHANGELOG.md +++ b/packages/typescript/ai/CHANGELOG.md @@ -1,5 +1,53 @@ # @tanstack/ai +## 0.5.0 + +### Minor Changes + +- Tighten the AG-UI adapter contract and simplify the core stream processor. ([#275](https://github.com/TanStack/ai/pull/275)) + + **Breaking type changes:** + - `TextMessageContentEvent.delta` is now required (was optional) + - `StepFinishedEvent.delta` is now required (was optional) + + All first-party adapters already sent `delta` on every event, so this is a type-level enforcement of existing behavior. Community adapters that follow the reference implementations will not need code changes. + + **Core processor simplifications:** + - `TEXT_MESSAGE_START` now resets text segment state, replacing heuristic overlap detection + - `TOOL_CALL_END` is now the authoritative signal for tool call input completion + - Removed delta/content fallback logic, whitespace-only message cleanup, and finish-reason conflict arbitration from the processor + + **Adapter fixes:** + - Gemini: filter whitespace-only text parts, fix STEP_FINISHED content accumulation, emit fresh TEXT_MESSAGE_START after tool calls + - Anthropic: emit fresh TEXT_MESSAGE_START after tool_use blocks for proper text segmentation + +### Patch Changes + +- fix(ai, ai-client, ai-anthropic, ai-gemini): fix multi-turn conversations failing after tool calls ([#275](https://github.com/TanStack/ai/pull/275)) + + **Core (@tanstack/ai):** + - Lazy assistant message creation: `StreamProcessor` now defers creating the assistant message until the first content-bearing chunk arrives (text, tool call, thinking, or error), eliminating empty `parts: []` messages from appearing during auto-continuation when the model returns no content + - Add `prepareAssistantMessage()` (lazy) alongside deprecated `startAssistantMessage()` (eager, backwards-compatible) + - Add `getCurrentAssistantMessageId()` to check if a message was created + - **Rewrite `uiMessageToModelMessages()` to preserve part ordering**: the function now walks parts sequentially instead of separating by type, producing correctly interleaved assistant/tool messages (text1 + toolCall1 → toolResult1 → text2 + toolCall2 → toolResult2) instead of concatenating all text and batching all tool calls. This fixes multi-round tool flows where the model would see garbled conversation history and re-call tools unnecessarily. + - Deduplicate tool result messages: when a client tool has both a `tool-result` part and a `tool-call` part with `output`, only one `role: 'tool'` message is emitted per tool call ID + + **Client (@tanstack/ai-client):** + - Update `ChatClient.processStream()` to use lazy assistant message creation, preventing UI flicker from empty messages being created then removed + + **Anthropic:** + - Fix consecutive user-role messages violating Anthropic's alternating role requirement by merging them in `formatMessages` + - Deduplicate `tool_result` blocks with the same `tool_use_id` + - Filter out empty assistant messages from conversation history + - Suppress duplicate `RUN_FINISHED` event from `message_stop` when `message_delta` already emitted one + - Fix `TEXT_MESSAGE_END` incorrectly emitting for `tool_use` content blocks + - Add Claude Opus 4.6 model support with adaptive thinking and effort parameter + + **Gemini:** + - Fix consecutive user-role messages violating Gemini's alternating role requirement by merging them in `formatMessages` + - Deduplicate `functionResponse` parts with the same name (tool call ID) + - Filter out empty model messages from conversation history + ## 0.4.2 ### Patch Changes diff --git a/packages/typescript/ai/package.json b/packages/typescript/ai/package.json index 4b76449d..3fe3ccb3 100644 --- a/packages/typescript/ai/package.json +++ b/packages/typescript/ai/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/ai", - "version": "0.4.2", + "version": "0.5.0", "description": "Core TanStack AI library - Open source AI SDK", "author": "Tanner Linsley", "license": "MIT", diff --git a/packages/typescript/preact-ai-devtools/CHANGELOG.md b/packages/typescript/preact-ai-devtools/CHANGELOG.md index 14f38624..43785a94 100644 --- a/packages/typescript/preact-ai-devtools/CHANGELOG.md +++ b/packages/typescript/preact-ai-devtools/CHANGELOG.md @@ -1,5 +1,12 @@ # @tanstack/preact-ai-devtools +## 0.1.7 + +### Patch Changes + +- Updated dependencies []: + - @tanstack/ai-devtools-core@0.3.3 + ## 0.1.6 ### Patch Changes diff --git a/packages/typescript/preact-ai-devtools/package.json b/packages/typescript/preact-ai-devtools/package.json index e98092eb..024a00ff 100644 --- a/packages/typescript/preact-ai-devtools/package.json +++ b/packages/typescript/preact-ai-devtools/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/preact-ai-devtools", - "version": "0.1.6", + "version": "0.1.7", "description": "Preact Devtools for TanStack AI.", "author": "tannerlinsley", "license": "MIT", diff --git a/packages/typescript/react-ai-devtools/CHANGELOG.md b/packages/typescript/react-ai-devtools/CHANGELOG.md index eb3ed942..92f8c940 100644 --- a/packages/typescript/react-ai-devtools/CHANGELOG.md +++ b/packages/typescript/react-ai-devtools/CHANGELOG.md @@ -1,5 +1,12 @@ # @tanstack/react-ai-devtools +## 0.2.7 + +### Patch Changes + +- Updated dependencies []: + - @tanstack/ai-devtools-core@0.3.3 + ## 0.2.6 ### Patch Changes diff --git a/packages/typescript/react-ai-devtools/package.json b/packages/typescript/react-ai-devtools/package.json index 1e53098c..87a731c4 100644 --- a/packages/typescript/react-ai-devtools/package.json +++ b/packages/typescript/react-ai-devtools/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/react-ai-devtools", - "version": "0.2.6", + "version": "0.2.7", "description": "React Devtools for TanStack AI.", "author": "tannerlinsley", "license": "MIT", diff --git a/packages/typescript/smoke-tests/adapters/CHANGELOG.md b/packages/typescript/smoke-tests/adapters/CHANGELOG.md index 8b696792..d5b385ae 100644 --- a/packages/typescript/smoke-tests/adapters/CHANGELOG.md +++ b/packages/typescript/smoke-tests/adapters/CHANGELOG.md @@ -1,5 +1,18 @@ # @tanstack/tests-adapters +## 0.1.12 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + - @tanstack/ai-anthropic@0.5.0 + - @tanstack/ai-gemini@0.5.0 + - @tanstack/ai-grok@0.5.0 + - @tanstack/ai-ollama@0.5.0 + - @tanstack/ai-openai@0.5.0 + - @tanstack/ai-openrouter@0.5.0 + ## 0.1.11 ### Patch Changes diff --git a/packages/typescript/smoke-tests/adapters/package.json b/packages/typescript/smoke-tests/adapters/package.json index 6db3c656..f51bd9d5 100644 --- a/packages/typescript/smoke-tests/adapters/package.json +++ b/packages/typescript/smoke-tests/adapters/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/tests-adapters", - "version": "0.1.11", + "version": "0.1.12", "private": true, "description": "Tests for TanStack AI adapters", "author": "", diff --git a/packages/typescript/smoke-tests/e2e/CHANGELOG.md b/packages/typescript/smoke-tests/e2e/CHANGELOG.md index 839861b9..61eb1953 100644 --- a/packages/typescript/smoke-tests/e2e/CHANGELOG.md +++ b/packages/typescript/smoke-tests/e2e/CHANGELOG.md @@ -1,5 +1,16 @@ # @tanstack/smoke-tests-e2e +## 0.0.16 + +### Patch Changes + +- Updated dependencies [[`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd), [`5d98472`](https://github.com/TanStack/ai/commit/5d984722e1f84725e3cfda834fbda3d0341ecedd)]: + - @tanstack/ai@0.5.0 + - @tanstack/ai-client@0.4.4 + - @tanstack/ai-openai@0.5.0 + - @tanstack/ai-react@0.5.1 + - @tanstack/tests-adapters@0.1.12 + ## 0.0.15 ### Patch Changes diff --git a/packages/typescript/smoke-tests/e2e/package.json b/packages/typescript/smoke-tests/e2e/package.json index d39e4974..ef2602a3 100644 --- a/packages/typescript/smoke-tests/e2e/package.json +++ b/packages/typescript/smoke-tests/e2e/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/smoke-tests-e2e", - "version": "0.0.15", + "version": "0.0.16", "description": "E2E tests for TanStack AI chat", "private": true, "type": "module", diff --git a/packages/typescript/solid-ai-devtools/CHANGELOG.md b/packages/typescript/solid-ai-devtools/CHANGELOG.md index fe7ec5e3..6b08906a 100644 --- a/packages/typescript/solid-ai-devtools/CHANGELOG.md +++ b/packages/typescript/solid-ai-devtools/CHANGELOG.md @@ -1,5 +1,12 @@ # @tanstack/solid-ai-devtools +## 0.2.7 + +### Patch Changes + +- Updated dependencies []: + - @tanstack/ai-devtools-core@0.3.3 + ## 0.2.6 ### Patch Changes diff --git a/packages/typescript/solid-ai-devtools/package.json b/packages/typescript/solid-ai-devtools/package.json index 25bdb41a..3c1630d2 100644 --- a/packages/typescript/solid-ai-devtools/package.json +++ b/packages/typescript/solid-ai-devtools/package.json @@ -1,6 +1,6 @@ { "name": "@tanstack/solid-ai-devtools", - "version": "0.2.6", + "version": "0.2.7", "description": "Solid TanStack AI Devtools", "author": "", "license": "MIT", From b47c8d2f46132df58d1f3d2d0e6d31398830fa1c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 12 Feb 2026 09:35:56 +0100 Subject: [PATCH 10/16] docs: regenerate API documentation (#288) Co-authored-by: github-actions[bot] --- docs/reference/classes/StreamProcessor.md | 122 +++++++++++++----- docs/reference/functions/chat.md | 2 +- .../convertMessagesToModelMessages.md | 2 +- .../reference/functions/createReplayStream.md | 2 +- docs/reference/functions/generateMessageId.md | 2 +- .../functions/modelMessageToUIMessage.md | 2 +- .../functions/modelMessagesToUIMessages.md | 2 +- .../functions/normalizeToUIMessage.md | 2 +- .../functions/uiMessageToModelMessages.md | 21 ++- .../reference/interfaces/StepFinishedEvent.md | 6 +- .../interfaces/StreamProcessorEvents.md | 20 +-- .../interfaces/StreamProcessorOptions.md | 12 +- .../interfaces/TextMessageContentEvent.md | 8 +- 13 files changed, 134 insertions(+), 69 deletions(-) diff --git a/docs/reference/classes/StreamProcessor.md b/docs/reference/classes/StreamProcessor.md index 232a61ec..01f68d03 100644 --- a/docs/reference/classes/StreamProcessor.md +++ b/docs/reference/classes/StreamProcessor.md @@ -5,23 +5,25 @@ title: StreamProcessor # Class: StreamProcessor -Defined in: [activities/chat/stream/processor.ts:117](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L117) +Defined in: [activities/chat/stream/processor.ts:120](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L120) StreamProcessor - State machine for processing AI response streams Manages the full UIMessage[] conversation and emits events on changes. +Trusts the adapter contract: adapters emit clean AG-UI events in the +correct order. State tracking: - Full message array - Current assistant message being streamed -- Text content accumulation +- Text content accumulation (reset on TEXT_MESSAGE_START) - Multiple parallel tool calls -- Tool call completion detection +- Tool call completion via TOOL_CALL_END events -Tool call completion is detected when: -1. A new tool call starts at a different index -2. Text content arrives -3. Stream ends +## See + + - docs/chat-architecture.md#streamprocessor-internal-state — State field reference + - docs/chat-architecture.md#adapter-contract — What this class expects from adapters ## Constructors @@ -31,7 +33,7 @@ Tool call completion is detected when: new StreamProcessor(options): StreamProcessor; ``` -Defined in: [activities/chat/stream/processor.ts:145](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L145) +Defined in: [activities/chat/stream/processor.ts:147](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L147) #### Parameters @@ -51,7 +53,7 @@ Defined in: [activities/chat/stream/processor.ts:145](https://github.com/TanStac addToolApprovalResponse(approvalId, approved): void; ``` -Defined in: [activities/chat/stream/processor.ts:287](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L287) +Defined in: [activities/chat/stream/processor.ts:329](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L329) Add an approval response (called by client after handling onApprovalRequest) @@ -80,7 +82,7 @@ addToolResult( error?): void; ``` -Defined in: [activities/chat/stream/processor.ts:243](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L243) +Defined in: [activities/chat/stream/processor.ts:285](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L285) Add a tool result (called by client after handling onToolCall) @@ -110,7 +112,7 @@ Add a tool result (called by client after handling onToolCall) addUserMessage(content, id?): UIMessage; ``` -Defined in: [activities/chat/stream/processor.ts:192](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L192) +Defined in: [activities/chat/stream/processor.ts:194](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L194) Add a user message to the conversation. Supports both simple string content and multimodal content arrays. @@ -159,7 +161,7 @@ processor.addUserMessage('Hello!', 'custom-id-123') areAllToolsComplete(): boolean; ``` -Defined in: [activities/chat/stream/processor.ts:318](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L318) +Defined in: [activities/chat/stream/processor.ts:360](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L360) Check if all tool calls in the last assistant message are complete Useful for auto-continue logic @@ -176,7 +178,7 @@ Useful for auto-continue logic clearMessages(): void; ``` -Defined in: [activities/chat/stream/processor.ts:362](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L362) +Defined in: [activities/chat/stream/processor.ts:404](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L404) Clear all messages @@ -192,14 +194,40 @@ Clear all messages finalizeStream(): void; ``` -Defined in: [activities/chat/stream/processor.ts:859](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L859) +Defined in: [activities/chat/stream/processor.ts:954](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L954) + +Finalize the stream — complete all pending operations. -Finalize the stream - complete all pending operations +Called when the async iterable ends (stream closed). Acts as the final +safety net: completes any remaining tool calls, flushes un-emitted text, +and fires onStreamEnd. #### Returns `void` +#### See + +docs/chat-architecture.md#single-shot-text-response — Finalization step + +*** + +### getCurrentAssistantMessageId() + +```ts +getCurrentAssistantMessageId(): string | null; +``` + +Defined in: [activities/chat/stream/processor.ts:246](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L246) + +Get the current assistant message ID (if one has been created). +Returns null if prepareAssistantMessage() was called but no content +has arrived yet. + +#### Returns + +`string` \| `null` + *** ### getMessages() @@ -208,7 +236,7 @@ Finalize the stream - complete all pending operations getMessages(): UIMessage[]; ``` -Defined in: [activities/chat/stream/processor.ts:310](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L310) +Defined in: [activities/chat/stream/processor.ts:352](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L352) Get current messages @@ -224,7 +252,7 @@ Get current messages getRecording(): ChunkRecording | null; ``` -Defined in: [activities/chat/stream/processor.ts:938](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L938) +Defined in: [activities/chat/stream/processor.ts:1051](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1051) Get the current recording @@ -240,7 +268,7 @@ Get the current recording getState(): ProcessorState; ``` -Defined in: [activities/chat/stream/processor.ts:911](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L911) +Defined in: [activities/chat/stream/processor.ts:1024](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1024) Get current processor state @@ -250,13 +278,33 @@ Get current processor state *** +### prepareAssistantMessage() + +```ts +prepareAssistantMessage(): void; +``` + +Defined in: [activities/chat/stream/processor.ts:224](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L224) + +Prepare for a new assistant message stream. +Does NOT create the message immediately -- the message is created lazily +when the first content-bearing chunk arrives via ensureAssistantMessage(). +This prevents empty assistant messages from flickering in the UI when +auto-continuation produces no content. + +#### Returns + +`void` + +*** + ### process() ```ts process(stream): Promise; ``` -Defined in: [activities/chat/stream/processor.ts:375](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L375) +Defined in: [activities/chat/stream/processor.ts:417](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L417) Process a stream and emit events through handlers @@ -278,9 +326,13 @@ Process a stream and emit events through handlers processChunk(chunk): void; ``` -Defined in: [activities/chat/stream/processor.ts:403](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L403) +Defined in: [activities/chat/stream/processor.ts:451](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L451) -Process a single chunk from the stream +Process a single chunk from the stream. + +Central dispatch for all AG-UI events. Each event type maps to a specific +handler. Events not listed in the switch are intentionally ignored +(RUN_STARTED, TEXT_MESSAGE_END, STEP_STARTED, STATE_SNAPSHOT, STATE_DELTA). #### Parameters @@ -292,6 +344,10 @@ Process a single chunk from the stream `void` +#### See + +docs/chat-architecture.md#adapter-contract — Expected event types and ordering + *** ### removeMessagesAfter() @@ -300,7 +356,7 @@ Process a single chunk from the stream removeMessagesAfter(index): void; ``` -Defined in: [activities/chat/stream/processor.ts:354](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L354) +Defined in: [activities/chat/stream/processor.ts:396](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L396) Remove messages after a certain index (for reload/retry) @@ -322,7 +378,7 @@ Remove messages after a certain index (for reload/retry) reset(): void; ``` -Defined in: [activities/chat/stream/processor.ts:961](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L961) +Defined in: [activities/chat/stream/processor.ts:1074](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1074) Full reset (including messages) @@ -338,7 +394,7 @@ Full reset (including messages) setMessages(messages): void; ``` -Defined in: [activities/chat/stream/processor.ts:164](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L164) +Defined in: [activities/chat/stream/processor.ts:166](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L166) Set the messages array (e.g., from persisted state) @@ -354,21 +410,23 @@ Set the messages array (e.g., from persisted state) *** -### startAssistantMessage() +### ~~startAssistantMessage()~~ ```ts startAssistantMessage(): string; ``` -Defined in: [activities/chat/stream/processor.ts:219](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L219) - -Start streaming a new assistant message -Returns the message ID +Defined in: [activities/chat/stream/processor.ts:236](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L236) #### Returns `string` +#### Deprecated + +Use prepareAssistantMessage() instead. This eagerly creates +an assistant message which can cause empty message flicker. + *** ### startRecording() @@ -377,7 +435,7 @@ Returns the message ID startRecording(): void; ``` -Defined in: [activities/chat/stream/processor.ts:925](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L925) +Defined in: [activities/chat/stream/processor.ts:1038](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1038) Start recording chunks @@ -396,7 +454,7 @@ toModelMessages(): ModelMessage< | null>[]; ``` -Defined in: [activities/chat/stream/processor.ts:299](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L299) +Defined in: [activities/chat/stream/processor.ts:341](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L341) Get the conversation as ModelMessages (for sending to LLM) @@ -415,7 +473,7 @@ Get the conversation as ModelMessages (for sending to LLM) static replay(recording, options?): Promise; ``` -Defined in: [activities/chat/stream/processor.ts:970](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L970) +Defined in: [activities/chat/stream/processor.ts:1094](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1094) Replay a recording through the processor diff --git a/docs/reference/functions/chat.md b/docs/reference/functions/chat.md index 26e44d5d..463e35cf 100644 --- a/docs/reference/functions/chat.md +++ b/docs/reference/functions/chat.md @@ -9,7 +9,7 @@ title: chat function chat(options): TextActivityResult; ``` -Defined in: [activities/chat/index.ts:1122](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/index.ts#L1122) +Defined in: [activities/chat/index.ts:1125](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/index.ts#L1125) Text activity - handles agentic text generation, one-shot text generation, and agentic structured output. diff --git a/docs/reference/functions/convertMessagesToModelMessages.md b/docs/reference/functions/convertMessagesToModelMessages.md index e75be767..7ffe1ccb 100644 --- a/docs/reference/functions/convertMessagesToModelMessages.md +++ b/docs/reference/functions/convertMessagesToModelMessages.md @@ -12,7 +12,7 @@ function convertMessagesToModelMessages(messages): ModelMessage< | null>[]; ``` -Defined in: [activities/chat/messages.ts:53](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L53) +Defined in: [activities/chat/messages.ts:63](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L63) Convert UIMessages or ModelMessages to ModelMessages diff --git a/docs/reference/functions/createReplayStream.md b/docs/reference/functions/createReplayStream.md index 14cab098..07b37432 100644 --- a/docs/reference/functions/createReplayStream.md +++ b/docs/reference/functions/createReplayStream.md @@ -9,7 +9,7 @@ title: createReplayStream function createReplayStream(recording): AsyncIterable; ``` -Defined in: [activities/chat/stream/processor.ts:982](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L982) +Defined in: [activities/chat/stream/processor.ts:1106](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L1106) Create an async iterable from a recording diff --git a/docs/reference/functions/generateMessageId.md b/docs/reference/functions/generateMessageId.md index 2b9714fb..1d9cd8b2 100644 --- a/docs/reference/functions/generateMessageId.md +++ b/docs/reference/functions/generateMessageId.md @@ -9,7 +9,7 @@ title: generateMessageId function generateMessageId(): string; ``` -Defined in: [activities/chat/messages.ts:363](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L363) +Defined in: [activities/chat/messages.ts:436](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L436) Generate a unique message ID diff --git a/docs/reference/functions/modelMessageToUIMessage.md b/docs/reference/functions/modelMessageToUIMessage.md index e7acdc39..dd64eb91 100644 --- a/docs/reference/functions/modelMessageToUIMessage.md +++ b/docs/reference/functions/modelMessageToUIMessage.md @@ -9,7 +9,7 @@ title: modelMessageToUIMessage function modelMessageToUIMessage(modelMessage, id?): UIMessage; ``` -Defined in: [activities/chat/messages.ts:238](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L238) +Defined in: [activities/chat/messages.ts:305](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L305) Convert a ModelMessage to UIMessage diff --git a/docs/reference/functions/modelMessagesToUIMessages.md b/docs/reference/functions/modelMessagesToUIMessages.md index 14e450f2..71277f11 100644 --- a/docs/reference/functions/modelMessagesToUIMessages.md +++ b/docs/reference/functions/modelMessagesToUIMessages.md @@ -9,7 +9,7 @@ title: modelMessagesToUIMessages function modelMessagesToUIMessages(modelMessages): UIMessage[]; ``` -Defined in: [activities/chat/messages.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L291) +Defined in: [activities/chat/messages.ts:364](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L364) Convert an array of ModelMessages to UIMessages diff --git a/docs/reference/functions/normalizeToUIMessage.md b/docs/reference/functions/normalizeToUIMessage.md index 6df2879f..9f72abdb 100644 --- a/docs/reference/functions/normalizeToUIMessage.md +++ b/docs/reference/functions/normalizeToUIMessage.md @@ -9,7 +9,7 @@ title: normalizeToUIMessage function normalizeToUIMessage(message, generateId): UIMessage; ``` -Defined in: [activities/chat/messages.ts:340](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L340) +Defined in: [activities/chat/messages.ts:413](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L413) Normalize a message (UIMessage or ModelMessage) to a UIMessage Ensures the message has an ID and createdAt timestamp diff --git a/docs/reference/functions/uiMessageToModelMessages.md b/docs/reference/functions/uiMessageToModelMessages.md index 5fb6364a..52107288 100644 --- a/docs/reference/functions/uiMessageToModelMessages.md +++ b/docs/reference/functions/uiMessageToModelMessages.md @@ -12,15 +12,22 @@ function uiMessageToModelMessages(uiMessage): ModelMessage< | null>[]; ``` -Defined in: [activities/chat/messages.ts:81](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L81) +Defined in: [activities/chat/messages.ts:98](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/messages.ts#L98) Convert a UIMessage to ModelMessage(s) -This conversion handles the parts-based structure: -- Text parts → content field (string or as part of ContentPart array) -- Multimodal parts (image, audio, video, document) → ContentPart array -- ToolCall parts → toolCalls array -- ToolResult parts → separate role="tool" messages +Walks the parts array IN ORDER to preserve the interleaving of text, +tool calls, and tool results. This is critical for multi-round tool +flows where the model generates text, calls a tool, gets the result, +then generates more text and calls another tool. + +The output preserves the sequential structure: + text1 → toolCall1 → toolResult1 → text2 → toolCall2 → toolResult2 +becomes: + assistant: {content: "text1", toolCalls: [toolCall1]} + tool: toolResult1 + assistant: {content: "text2", toolCalls: [toolCall2]} + tool: toolResult2 ## Parameters @@ -37,4 +44,4 @@ The UIMessage to convert \| [`ContentPart`](../type-aliases/ContentPart.md)\<`unknown`, `unknown`, `unknown`, `unknown`, `unknown`\>[] \| `null`\>[] -An array of ModelMessages (may be multiple if tool results are present) +An array of ModelMessages preserving part ordering diff --git a/docs/reference/interfaces/StepFinishedEvent.md b/docs/reference/interfaces/StepFinishedEvent.md index af818c10..cc1715d8 100644 --- a/docs/reference/interfaces/StepFinishedEvent.md +++ b/docs/reference/interfaces/StepFinishedEvent.md @@ -23,14 +23,14 @@ optional content: string; Defined in: [types.ts:869](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L869) -Full accumulated thinking content +Full accumulated thinking content (optional, for debugging) *** -### delta? +### delta ```ts -optional delta: string; +delta: string; ``` Defined in: [types.ts:867](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L867) diff --git a/docs/reference/interfaces/StreamProcessorEvents.md b/docs/reference/interfaces/StreamProcessorEvents.md index 2182c9d1..65bcd799 100644 --- a/docs/reference/interfaces/StreamProcessorEvents.md +++ b/docs/reference/interfaces/StreamProcessorEvents.md @@ -5,7 +5,7 @@ title: StreamProcessorEvents # Interface: StreamProcessorEvents -Defined in: [activities/chat/stream/processor.ts:51](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L51) +Defined in: [activities/chat/stream/processor.ts:54](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L54) Events emitted by the StreamProcessor @@ -17,7 +17,7 @@ Events emitted by the StreamProcessor optional onApprovalRequest: (args) => void; ``` -Defined in: [activities/chat/stream/processor.ts:66](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L66) +Defined in: [activities/chat/stream/processor.ts:69](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L69) #### Parameters @@ -51,7 +51,7 @@ Defined in: [activities/chat/stream/processor.ts:66](https://github.com/TanStack optional onError: (error) => void; ``` -Defined in: [activities/chat/stream/processor.ts:58](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L58) +Defined in: [activities/chat/stream/processor.ts:61](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L61) #### Parameters @@ -71,7 +71,7 @@ Defined in: [activities/chat/stream/processor.ts:58](https://github.com/TanStack optional onMessagesChange: (messages) => void; ``` -Defined in: [activities/chat/stream/processor.ts:53](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L53) +Defined in: [activities/chat/stream/processor.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L56) #### Parameters @@ -91,7 +91,7 @@ Defined in: [activities/chat/stream/processor.ts:53](https://github.com/TanStack optional onStreamEnd: (message) => void; ``` -Defined in: [activities/chat/stream/processor.ts:57](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L57) +Defined in: [activities/chat/stream/processor.ts:60](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L60) #### Parameters @@ -111,7 +111,7 @@ Defined in: [activities/chat/stream/processor.ts:57](https://github.com/TanStack optional onStreamStart: () => void; ``` -Defined in: [activities/chat/stream/processor.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L56) +Defined in: [activities/chat/stream/processor.ts:59](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L59) #### Returns @@ -125,7 +125,7 @@ Defined in: [activities/chat/stream/processor.ts:56](https://github.com/TanStack optional onTextUpdate: (messageId, content) => void; ``` -Defined in: [activities/chat/stream/processor.ts:74](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L74) +Defined in: [activities/chat/stream/processor.ts:77](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L77) #### Parameters @@ -149,7 +149,7 @@ Defined in: [activities/chat/stream/processor.ts:74](https://github.com/TanStack optional onThinkingUpdate: (messageId, content) => void; ``` -Defined in: [activities/chat/stream/processor.ts:81](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L81) +Defined in: [activities/chat/stream/processor.ts:84](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L84) #### Parameters @@ -173,7 +173,7 @@ Defined in: [activities/chat/stream/processor.ts:81](https://github.com/TanStack optional onToolCall: (args) => void; ``` -Defined in: [activities/chat/stream/processor.ts:61](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L61) +Defined in: [activities/chat/stream/processor.ts:64](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L64) #### Parameters @@ -203,7 +203,7 @@ Defined in: [activities/chat/stream/processor.ts:61](https://github.com/TanStack optional onToolCallStateChange: (messageId, toolCallId, state, args) => void; ``` -Defined in: [activities/chat/stream/processor.ts:75](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L75) +Defined in: [activities/chat/stream/processor.ts:78](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L78) #### Parameters diff --git a/docs/reference/interfaces/StreamProcessorOptions.md b/docs/reference/interfaces/StreamProcessorOptions.md index 3e2c0c40..f0b3a593 100644 --- a/docs/reference/interfaces/StreamProcessorOptions.md +++ b/docs/reference/interfaces/StreamProcessorOptions.md @@ -5,7 +5,7 @@ title: StreamProcessorOptions # Interface: StreamProcessorOptions -Defined in: [activities/chat/stream/processor.ts:87](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L87) +Defined in: [activities/chat/stream/processor.ts:90](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L90) Options for StreamProcessor @@ -17,7 +17,7 @@ Options for StreamProcessor optional chunkStrategy: ChunkStrategy; ``` -Defined in: [activities/chat/stream/processor.ts:88](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L88) +Defined in: [activities/chat/stream/processor.ts:91](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L91) *** @@ -27,7 +27,7 @@ Defined in: [activities/chat/stream/processor.ts:88](https://github.com/TanStack optional events: StreamProcessorEvents; ``` -Defined in: [activities/chat/stream/processor.ts:90](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L90) +Defined in: [activities/chat/stream/processor.ts:93](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L93) Event-driven handlers @@ -39,7 +39,7 @@ Event-driven handlers optional initialMessages: UIMessage[]; ``` -Defined in: [activities/chat/stream/processor.ts:97](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L97) +Defined in: [activities/chat/stream/processor.ts:100](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L100) Initial messages to populate the processor @@ -51,7 +51,7 @@ Initial messages to populate the processor optional jsonParser: object; ``` -Defined in: [activities/chat/stream/processor.ts:91](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L91) +Defined in: [activities/chat/stream/processor.ts:94](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L94) #### parse() @@ -77,6 +77,6 @@ parse: (jsonString) => any; optional recording: boolean; ``` -Defined in: [activities/chat/stream/processor.ts:95](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L95) +Defined in: [activities/chat/stream/processor.ts:98](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/activities/chat/stream/processor.ts#L98) Enable recording for replay testing diff --git a/docs/reference/interfaces/TextMessageContentEvent.md b/docs/reference/interfaces/TextMessageContentEvent.md index baf6bdd1..5fca6eca 100644 --- a/docs/reference/interfaces/TextMessageContentEvent.md +++ b/docs/reference/interfaces/TextMessageContentEvent.md @@ -23,19 +23,19 @@ optional content: string; Defined in: [types.ts:795](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L795) -Full accumulated content so far +Full accumulated content so far (optional, for debugging) *** -### delta? +### delta ```ts -optional delta: string; +delta: string; ``` Defined in: [types.ts:793](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L793) -The incremental content token (may be undefined if only content is provided) +The incremental content token *** From b50a67c7daf2dc89514f60ad3d072d7cba92a298 Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Mon, 9 Feb 2026 08:12:34 +0100 Subject: [PATCH 11/16] feat(ai-groq): add Groq adapter package Introduce a new Groq adapter to enable fast LLM inference via Groq's API. Includes TypeScript configuration and Vite build setup for consistent tooling across the AI packages. --- pnpm-lock.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b10f0363..7b7f6108 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -5757,6 +5757,7 @@ packages: glob@10.5.0: resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==} deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me + deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me hasBin: true glob@13.0.0: @@ -7925,7 +7926,7 @@ packages: tar@7.5.2: resolution: {integrity: sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==} engines: {node: '>=18'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} @@ -8746,6 +8747,7 @@ packages: resolution: {integrity: sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==} engines: {node: '>=18'} deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation + deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation whatwg-fetch@3.6.20: resolution: {integrity: sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==} From 1580c73cf644ebd5aefbc05961f967154bd1563e Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Wed, 11 Feb 2026 11:41:49 +0100 Subject: [PATCH 12/16] feat: opus 4.6 model & additional config for provider clients (#278) * feat: opus 4.6 model & additional config for provider clients * fix: isue with gemini adapter --- pnpm-lock.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7b7f6108..aec26cf4 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -7926,7 +7926,7 @@ packages: tar@7.5.2: resolution: {integrity: sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==} engines: {node: '>=18'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} From decc53f3815a774e085b03519828c78e264e7617 Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Thu, 12 Feb 2026 18:17:29 +0100 Subject: [PATCH 13/16] chore: remove unwanted groq-sdk package dependencies in root --- package.json | 6 +- pnpm-lock.yaml | 359 +++++++++++++++++++------------------------------ 2 files changed, 143 insertions(+), 222 deletions(-) diff --git a/package.json b/package.json index 0f44a074..8e31c068 100644 --- a/package.json +++ b/package.json @@ -70,9 +70,5 @@ "typescript": "5.9.3", "vite": "^7.2.7", "vitest": "^4.0.14" - }, - "dependencies": { - "ai-groq": "link:../../.local/share/pnpm/global/5/node_modules/@tanstack/ai-groq", - "groq-sdk": "^0.37.0" } -} +} \ No newline at end of file diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index aec26cf4..cf0c0caf 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -10,10 +10,6 @@ overrides: importers: .: - dependencies: - groq-sdk: - specifier: ^0.37.0 - version: 0.37.0 devDependencies: '@changesets/cli': specifier: ^2.29.8 @@ -119,19 +115,19 @@ importers: version: 0.8.4(@types/react-dom@19.2.3(@types/react@19.2.7))(@types/react@19.2.7)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(solid-js@1.9.10) '@tanstack/react-router': specifier: ^1.158.4 - version: 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + version: 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-router-devtools': specifier: ^1.158.4 - version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + version: 1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.159.4)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-router-ssr-query': specifier: ^1.158.4 - version: 1.158.4(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + version: 1.159.5(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.159.4)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-start': specifier: ^1.159.0 - version: 1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-plugin': specifier: ^1.158.4 - version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) capnweb: specifier: ^0.1.0 version: 0.1.0 @@ -234,22 +230,22 @@ importers: version: 0.8.4(@types/react-dom@19.2.3(@types/react@19.2.7))(@types/react@19.2.7)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(solid-js@1.9.10) '@tanstack/react-router': specifier: ^1.158.4 - version: 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + version: 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-router-devtools': specifier: ^1.158.4 - version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + version: 1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.159.4)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-router-ssr-query': specifier: ^1.158.4 - version: 1.158.4(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + version: 1.159.5(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.159.4)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-start': specifier: ^1.159.0 - version: 1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/react-store': specifier: ^0.8.0 version: 0.8.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/router-plugin': specifier: ^1.158.4 - version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/store': specifier: ^0.8.0 version: 0.8.0 @@ -367,7 +363,7 @@ importers: version: 1.154.7(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-plugin': specifier: ^1.158.4 - version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/solid-ai-devtools': specifier: workspace:* version: link:../../packages/typescript/solid-ai-devtools @@ -379,13 +375,13 @@ importers: version: 1.141.1(solid-js@1.9.10) '@tanstack/solid-router-devtools': specifier: ^1.139.10 - version: 1.141.1(@tanstack/router-core@1.158.4)(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(csstype@3.2.3)(solid-js@1.9.10) + version: 1.141.1(@tanstack/router-core@1.159.4)(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(csstype@3.2.3)(solid-js@1.9.10) '@tanstack/solid-router-ssr-query': specifier: ^1.139.10 - version: 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.2(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3) + version: 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.2(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3) '@tanstack/solid-start': specifier: ^1.139.10 - version: 1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(solid-js@1.9.10)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 1.141.1(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(solid-js@1.9.10)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/solid-store': specifier: ^0.8.0 version: 0.8.0(solid-js@1.9.10) @@ -1174,13 +1170,13 @@ importers: version: 1.154.7(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/react-router': specifier: ^1.158.4 - version: 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + version: 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-start': specifier: ^1.159.0 - version: 1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-plugin': specifier: ^1.158.4 - version: 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/tests-adapters': specifier: workspace:* version: link:../adapters @@ -1284,10 +1280,10 @@ importers: version: 1.154.7(rolldown@1.0.0-beta.53)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/react-router': specifier: ^1.158.4 - version: 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + version: 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/react-start': specifier: ^1.159.0 - version: 1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start': specifier: ^1.120.20 version: 1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2) @@ -3568,20 +3564,20 @@ packages: peerDependencies: react: ^18 || ^19 - '@tanstack/react-router-devtools@1.158.4': - resolution: {integrity: sha512-/EkrrJGTPC7MwLfcYYmZM71ANDMLbwcYvBtDA+48LqHUKal8mpWlaodiWdFFnVQ7ny/unbUxljgdrNV9YZiyFQ==} + '@tanstack/react-router-devtools@1.159.5': + resolution: {integrity: sha512-IIyomu+ypWTxyoYT32mxamVmdTs7ZCGcTbdj7HVvtD3xp1lvo/bwRXj9oERENmb+OAPOaWF2doRYC/pmKjK5vg==} engines: {node: '>=12'} peerDependencies: - '@tanstack/react-router': ^1.158.4 - '@tanstack/router-core': ^1.158.4 + '@tanstack/react-router': ^1.159.5 + '@tanstack/router-core': ^1.159.4 react: '>=18.0.0 || >=19.0.0' react-dom: '>=18.0.0 || >=19.0.0' peerDependenciesMeta: '@tanstack/router-core': optional: true - '@tanstack/react-router-ssr-query@1.158.4': - resolution: {integrity: sha512-f+XzxO06ILM2i5CGtWqcb3+yaAvp8XgT5hMykKmwwaBnf3Ctc6O8tN/05Ovj0ajXWuROk3HTjg67OcWD7JxI6Q==} + '@tanstack/react-router-ssr-query@1.159.5': + resolution: {integrity: sha512-r/Um9DASjmaTdiyXA3cpooGLrSD1BlcXUNsRz4zAlvD38m0gIFvaykOM2Naztqm9H+2CJkocRZp2/kR69Xcmyg==} engines: {node: '>=12'} peerDependencies: '@tanstack/query-core': '>=5.90.0' @@ -3597,8 +3593,8 @@ packages: react: '>=18.0.0 || >=19.0.0' react-dom: '>=18.0.0 || >=19.0.0' - '@tanstack/react-router@1.158.4': - resolution: {integrity: sha512-i15xXumgvpuM+4NSuIwgouGezuj9eHjZsgpTZSQ7E9pa8rYmhZbWnf8xU68qaLmaKIol/e75o/YzVH2QWHs3iQ==} + '@tanstack/react-router@1.159.5': + resolution: {integrity: sha512-rVb0MtKzP5c0BkWIoFgWBiRAJHYSU3bhsEHbT0cRdRLmlJiw21Awb6VEjgYq3hJiEhowcKKm6J8AdRD/8oZ5dQ==} engines: {node: '>=12'} peerDependencies: react: '>=18.0.0 || >=19.0.0' @@ -3611,8 +3607,8 @@ packages: react: '>=18.0.0 || >=19.0.0' react-dom: '>=18.0.0 || >=19.0.0' - '@tanstack/react-start-client@1.158.4': - resolution: {integrity: sha512-ctEBgpYAPZ3i4EPZlJ45XS/lXPO73MkELec+hXf8NfK0lDQDaUy7LfWu41NPaftdZFJPOncDCfutwpUXD98YlA==} + '@tanstack/react-start-client@1.159.5': + resolution: {integrity: sha512-Qynx7XWHI1rhVpSUx6P40zazcQVJRhl2fnAcSH0I6vaAxSuZm8lvI37YacXPRu8flvI/ZGlF095arxiyps+A0w==} engines: {node: '>=22.12.0'} peerDependencies: react: '>=18.0.0 || >=19.0.0' @@ -3636,15 +3632,15 @@ packages: react: '>=18.0.0 || >=19.0.0' react-dom: '>=18.0.0 || >=19.0.0' - '@tanstack/react-start-server@1.159.0': - resolution: {integrity: sha512-1nPj7TEOpoIlTW0lftaHuU9Ol1ZDQwRCUWr6UvaPUbapq9nWR8kwYFjyCLbopBjyakFFNgz88/stdbZObt5h2A==} + '@tanstack/react-start-server@1.159.5': + resolution: {integrity: sha512-X4/SunwDTEbGkYTfM0gR+79amfk6phAM+2+NlC2s7TX2cD51xE8bUz2a//RxfOh9xg8f0f2CRIO34xTEDHGTfQ==} engines: {node: '>=22.12.0'} peerDependencies: react: '>=18.0.0 || >=19.0.0' react-dom: '>=18.0.0 || >=19.0.0' - '@tanstack/react-start@1.159.0': - resolution: {integrity: sha512-/ky8Pbu0cmj5dAQfi8LXHpAd/eepyQqDo0eSI/OPYQ2wZ8u8UPwycFvou8t8mq5pkinu+l7JX45UD7mNvzvVNg==} + '@tanstack/react-start@1.159.5': + resolution: {integrity: sha512-vfnF7eYswAK54ru6Ay08nb0TXVzTBaVRsbbRW7hx2M0chgwtSx+YScYzoixqkccRARQBN8a/CeVq7vNFW8525w==} engines: {node: '>=22.12.0'} peerDependencies: react: '>=18.0.0 || >=19.0.0' @@ -3665,8 +3661,8 @@ packages: resolution: {integrity: sha512-fR1GGpp6v3dVKu4KIAjEh+Sd0qGLQd/wvCOVHeopSY6aFidXKCzwrS5cBOBqoPPWTKmn6CdW1a0CzFr5Furdog==} engines: {node: '>=12'} - '@tanstack/router-core@1.158.4': - resolution: {integrity: sha512-KikgYdyrEFqsjjgv9pMhDTMmASMAyFRvUiKFdQPQtXq3aD1qv/zck4CbA4bfzp9N9nYu/qvWwU1mlYU4u5JeXg==} + '@tanstack/router-core@1.159.4': + resolution: {integrity: sha512-MFzPH39ijNO83qJN3pe7x4iAlhZyqgao3sJIzv3SJ4Pnk12xMnzuDzIAQT/1WV6JolPQEcw0Wr4L5agF8yxoeg==} engines: {node: '>=12'} '@tanstack/router-devtools-core@1.141.1': @@ -3680,11 +3676,11 @@ packages: csstype: optional: true - '@tanstack/router-devtools-core@1.158.4': - resolution: {integrity: sha512-9MKzstYp/6sNRSwJY2b9ipVW8b8/x1iSFNfLhOJur2tnjB3RhwCDfy0u+to70BrRpBEWeq7jvJoVdP029gzUUg==} + '@tanstack/router-devtools-core@1.159.4': + resolution: {integrity: sha512-qMUeIv+6n1mZOcO2raCIbdOeDeMpJEmgm6oMs/nWEG61lYrzJYaCcpBTviAX0nRhSiQSUCX9cHiosUEA0e2HAw==} engines: {node: '>=12'} peerDependencies: - '@tanstack/router-core': ^1.158.4 + '@tanstack/router-core': ^1.159.4 csstype: ^3.0.10 peerDependenciesMeta: csstype: @@ -3698,8 +3694,8 @@ packages: resolution: {integrity: sha512-21RbVAoIDn7s/n/PKMN6U60d5hCeVADrBH/uN6B/poMT4MVYtJXqISVzkc2RAboVRw6eRdYFeF+YlwA3nF6y3Q==} engines: {node: '>=12'} - '@tanstack/router-generator@1.158.4': - resolution: {integrity: sha512-RQmqMTT0oV8dS/3Glcq9SPzDZqOPyKb/LVFUkNoTfMwW88WyGnQcYqZAkmVk/CGBWWDfwObOUZoGq5jTF7bG8w==} + '@tanstack/router-generator@1.159.4': + resolution: {integrity: sha512-O8tICQoSuvK6vs3mvBdI3zVLFmYfj/AYDCX0a5msSADP/2S0GsgDDTB5ah731TqYCtjeNriaWz9iqst38cjF/Q==} engines: {node: '>=12'} '@tanstack/router-plugin@1.131.50': @@ -3744,12 +3740,12 @@ packages: webpack: optional: true - '@tanstack/router-plugin@1.158.4': - resolution: {integrity: sha512-g2sytAhljw6Jd6Klu37OZ75+o+vhiGdbWtnBy/4rYLC4NN6hSnjgJQRI3+h1CI1KQ4EUgsZYZr/hgE1KHoiWYQ==} + '@tanstack/router-plugin@1.159.5': + resolution: {integrity: sha512-i2LR3WRaBOAZ1Uab5QBG9UxZIRJ3V56JVu890NysbuX15rgzRiL5yLAbfenOHdhaHy2+4joX35VICAHuVWy7Og==} engines: {node: '>=12'} peerDependencies: '@rsbuild/core': '>=1.0.2' - '@tanstack/react-router': ^1.158.4 + '@tanstack/react-router': ^1.159.5 vite: '>=5.0.0 || >=6.0.0 || >=7.0.0' vite-plugin-solid: ^2.11.10 webpack: '>=5.92.0' @@ -3772,8 +3768,8 @@ packages: '@tanstack/query-core': '>=5.90.0' '@tanstack/router-core': '>=1.127.0' - '@tanstack/router-ssr-query-core@1.158.4': - resolution: {integrity: sha512-gZRx0pGaRc7NPrwQSAfnn/DVWEsd01cf5TaW5yTyf3R5ZP/I++KNEW3lBXyRo1RyKedPC45R+Id6HpDeEaidyg==} + '@tanstack/router-ssr-query-core@1.159.4': + resolution: {integrity: sha512-zqgJaf8iTk2ugrJqvU+b8NnFf3MKcnFaurtlp5tP38lNHQGW4En9UaOBOou/6zYZMlKQBcvqxSGJiwaLIyTK2g==} engines: {node: '>=12'} peerDependencies: '@tanstack/query-core': '>=5.90.0' @@ -3872,8 +3868,8 @@ packages: resolution: {integrity: sha512-Rk/b0ekX7p0ZBKOg9WM5c632YPqu7GlvZSYnAjNi1GDp1/sET6g2Trp+GAjs1s8kakp2pMQ4sZUG/11grCMfJw==} engines: {node: '>=22.12.0'} - '@tanstack/start-client-core@1.158.4': - resolution: {integrity: sha512-qpUYwJMMCEKgJuMz2CJLt53XrObi1BSjV1gG5SgBWRRVOHL8zky55tu1fEqHEa26jTTA6mUcBnPzYE8vIjRpAw==} + '@tanstack/start-client-core@1.159.4': + resolution: {integrity: sha512-9j2i1PRTIGcYAD+509znve0ngK81ZUfbX4XCpoNFMaUUpRHoEPPK5I9+PzLFvL9sNOto67x+WULCUggzX+lEKQ==} engines: {node: '>=22.12.0'} '@tanstack/start-config@1.120.20': @@ -3900,8 +3896,8 @@ packages: peerDependencies: vite: '>=7.0.0' - '@tanstack/start-plugin-core@1.159.0': - resolution: {integrity: sha512-HGcji+Mhste9mDKUlKpRPfoIOaURr7UqQZ3AMb+6zpbXumc+apYW/CvlvWdF/hoZGBSVAniFpwXgV5L5IimnhA==} + '@tanstack/start-plugin-core@1.159.5': + resolution: {integrity: sha512-QGiBw+L3qu2sUY0Tg9KovxjDSi5kevoANEcq9RLX7iIhLkTjrILN6hnAlXZUzqk5Egaf0aN2yWhwI4HWucMprw==} engines: {node: '>=22.12.0'} peerDependencies: vite: '>=7.0.0' @@ -3914,8 +3910,8 @@ packages: resolution: {integrity: sha512-Qk/lZ/+iGUyNYeAAuj89bLR6GXLD/9BIpAR2CUwlS+xXGL0kQmOFcb1UvccWZ2QwtW+csxJW4NeQOeMuqsfyhA==} engines: {node: '>=22.12.0'} - '@tanstack/start-server-core@1.159.0': - resolution: {integrity: sha512-oE9UkWc7uIDvjAOsmzZ65Vz+JLb4S+bhMLGjx84lWY0G+GelJJvdr0rQiUFTWPIsbIxO2pdyIY995H55VUcowg==} + '@tanstack/start-server-core@1.159.4': + resolution: {integrity: sha512-sGpr+iil+pcY3Gglvbnxaj7fCEPTQJv4oF7YA24SVv8YvayLXtBXpF26miJLA+KR9P31dQdPYe1gTjv5zRyvHg==} engines: {node: '>=22.12.0'} '@tanstack/start-server-functions-client@1.131.50': @@ -3946,8 +3942,8 @@ packages: resolution: {integrity: sha512-UPOQd4qsytgmc+pHeeS3oIZQazhyGAmEaCS/IrZI42TzpuVh2ZbLVssKEoDziheNP1dH5KT2lsL1bU9asAw7tA==} engines: {node: '>=22.12.0'} - '@tanstack/start-storage-context@1.158.4': - resolution: {integrity: sha512-tz70q/6LTytstBIMRYt5GDRjPJPOHjnPNay85RJdq9ZlQKryeDThnshEttlBTDAxZP7wtwOv00lcAgFLFGP1hA==} + '@tanstack/start-storage-context@1.159.4': + resolution: {integrity: sha512-iGkmuCIq3PLI4GKOGwgUNHQKZ13YV8LGq62o2hVnyXE64Jm2SP7c5z6D1ndydpk4JwdRzQKlcOFT/1agvS6Nsg==} engines: {node: '>=22.12.0'} '@tanstack/start@1.120.20': @@ -4072,15 +4068,9 @@ packages: '@types/ms@2.1.0': resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} - '@types/node-fetch@2.6.13': - resolution: {integrity: sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==} - '@types/node@12.20.55': resolution: {integrity: sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ==} - '@types/node@18.19.130': - resolution: {integrity: sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==} - '@types/node@20.19.26': resolution: {integrity: sha512-0l6cjgF0XnihUpndDhk+nyD3exio3iKaYROSgvh/qSevPXax3L8p5DBRFjbvalnwatGgHEQn2R88y2fA3g4irg==} @@ -4519,10 +4509,6 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} - agentkeepalive@4.6.0: - resolution: {integrity: sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==} - engines: {node: '>= 8.0.0'} - ajv-draft-04@1.0.0: resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==} peerDependencies: @@ -5643,9 +5629,6 @@ packages: resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} engines: {node: '>=14'} - form-data-encoder@1.7.2: - resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} - form-data@4.0.5: resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} engines: {node: '>= 6'} @@ -5655,10 +5638,6 @@ packages: engines: {node: '>=18.3.0'} hasBin: true - formdata-node@4.4.1: - resolution: {integrity: sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==} - engines: {node: '>= 12.20'} - formdata-polyfill@4.0.10: resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} engines: {node: '>=12.20.0'} @@ -5757,7 +5736,6 @@ packages: glob@10.5.0: resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==} deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me - deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me hasBin: true glob@13.0.0: @@ -5811,9 +5789,6 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} - groq-sdk@0.37.0: - resolution: {integrity: sha512-lT72pcT8b/X5XrzdKf+rWVzUGW1OQSKESmL8fFN5cTbsf02gq6oFam4SVeNtzELt9cYE2Pt3pdGgSImuTbHFDg==} - gtoken@8.0.0: resolution: {integrity: sha512-+CqsMbHPiSTdtSO14O51eMNlrp9N79gmeqmXeouJOhfucAedHw9noVe/n5uJk3tbKE6a+6ZCQg3RPhVhHByAIw==} engines: {node: '>=18'} @@ -5840,9 +5815,10 @@ packages: crossws: optional: true - h3@2.0.1-rc.11: - resolution: {integrity: sha512-2myzjCqy32c1As9TjZW9fNZXtLqNedjFSrdFy2AjFBQQ3LzrnGoDdFDYfC0tV2e4vcyfJ2Sfo/F6NQhO2Ly/Mw==} + h3@2.0.1-rc.14: + resolution: {integrity: sha512-163qbGmTr/9rqQRNuqMqtgXnOUAkE4KTdauiC9y0E5iG1I65kte9NyfWvZw5RTDMt6eY+DtyoNzrQ9wA2BfvGQ==} engines: {node: '>=20.11.1'} + hasBin: true peerDependencies: crossws: ^0.4.1 peerDependenciesMeta: @@ -5978,9 +5954,6 @@ packages: resolution: {integrity: sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==} engines: {node: '>=16.17.0'} - humanize-ms@1.2.1: - resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} - iconv-lite@0.6.3: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} @@ -7769,8 +7742,8 @@ packages: sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - srvx@0.10.1: - resolution: {integrity: sha512-A//xtfak4eESMWWydSRFUVvCTQbSwivnGCEf8YGPe2eHU0+Z6znfUTCPF0a7oV3sObSOcrXHlL6Bs9vVctfXdg==} + srvx@0.11.4: + resolution: {integrity: sha512-m/2p87bqWZ94xpRN06qNBwh0xq/D0dXajnvPDSHFqrTogxuTWYNP1UHz6Cf+oY7D+NPLY35TJAp4ESIKn0WArQ==} engines: {node: '>=20.16.0'} hasBin: true @@ -8142,9 +8115,6 @@ packages: unctx@2.5.0: resolution: {integrity: sha512-p+Rz9x0R7X+CYDkT+Xg8/GhpcShTlU8n+cf9OtOEf7zEQsNcCZO1dPKNRDqvUTaq+P32PMMkxWHwfrxkqfqAYg==} - undici-types@5.26.5: - resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} - undici-types@5.28.4: resolution: {integrity: sha512-3OeMF5Lyowe8VW0skf5qaIE7Or3yS9LS7fvMUI0gg4YxpIBVg0L8BxCmROw2CcYhSkpR68Epz7CGc8MPj94Uww==} @@ -8726,10 +8696,6 @@ packages: resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} engines: {node: '>= 8'} - web-streams-polyfill@4.0.0-beta.3: - resolution: {integrity: sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==} - engines: {node: '>= 14'} - web-vitals@5.1.0: resolution: {integrity: sha512-ArI3kx5jI0atlTtmV0fWU3fjpLmq/nD3Zr1iFFlJLaqa5wLBkUSzINwBPySCX/8jRyjlmy1Volw1kz1g9XE4Jg==} @@ -8747,7 +8713,6 @@ packages: resolution: {integrity: sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==} engines: {node: '>=18'} deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation - deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation whatwg-fetch@3.6.20: resolution: {integrity: sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==} @@ -10948,23 +10913,23 @@ snapshots: '@tanstack/query-core': 5.90.12 react: 19.2.3 - '@tanstack/react-router-devtools@1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + '@tanstack/react-router-devtools@1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.159.4)(csstype@3.2.3)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: - '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-devtools-core': 1.158.4(@tanstack/router-core@1.158.4)(csstype@3.2.3) + '@tanstack/react-router': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/router-devtools-core': 1.159.4(@tanstack/router-core@1.159.4)(csstype@3.2.3) react: 19.2.3 react-dom: 19.2.3(react@19.2.3) optionalDependencies: - '@tanstack/router-core': 1.158.4 + '@tanstack/router-core': 1.159.4 transitivePeerDependencies: - csstype - '@tanstack/react-router-ssr-query@1.158.4(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.158.4)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + '@tanstack/react-router-ssr-query@1.159.5(@tanstack/query-core@5.90.12)(@tanstack/react-query@5.90.12(react@19.2.3))(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@tanstack/router-core@1.159.4)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: '@tanstack/query-core': 5.90.12 '@tanstack/react-query': 5.90.12(react@19.2.3) - '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-ssr-query-core': 1.158.4(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4) + '@tanstack/react-router': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/router-ssr-query-core': 1.159.4(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4) react: 19.2.3 react-dom: 19.2.3(react@19.2.3) transitivePeerDependencies: @@ -10981,11 +10946,11 @@ snapshots: tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + '@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: '@tanstack/history': 1.154.14 '@tanstack/react-store': 0.8.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-core': 1.158.4 + '@tanstack/router-core': 1.159.4 isbot: 5.1.32 react: 19.2.3 react-dom: 19.2.3(react@19.2.3) @@ -11002,19 +10967,19 @@ snapshots: tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/react-start-client@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + '@tanstack/react-start-client@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: - '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-core': 1.158.4 - '@tanstack/start-client-core': 1.158.4 + '@tanstack/react-router': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/router-core': 1.159.4 + '@tanstack/start-client-core': 1.159.4 react: 19.2.3 react-dom: 19.2.3(react@19.2.3) tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/react-start-plugin@1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@vitejs/plugin-react@4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/react-start-plugin@1.131.50(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@vitejs/plugin-react@4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: - '@tanstack/start-plugin-core': 1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/start-plugin-core': 1.131.50(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@vitejs/plugin-react': 4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) pathe: 2.0.3 vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -11116,27 +11081,27 @@ snapshots: transitivePeerDependencies: - crossws - '@tanstack/react-start-server@1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + '@tanstack/react-start-server@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: '@tanstack/history': 1.154.14 - '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/router-core': 1.158.4 - '@tanstack/start-client-core': 1.158.4 - '@tanstack/start-server-core': 1.159.0 + '@tanstack/react-router': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/router-core': 1.159.4 + '@tanstack/start-client-core': 1.159.4 + '@tanstack/start-server-core': 1.159.4 react: 19.2.3 react-dom: 19.2.3(react@19.2.3) transitivePeerDependencies: - crossws - '@tanstack/react-start@1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/react-start@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: - '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/react-start-client': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/react-start-server': 1.159.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-router': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-start-client': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-start-server': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tanstack/router-utils': 1.158.0 - '@tanstack/start-client-core': 1.158.4 - '@tanstack/start-plugin-core': 1.159.0(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@tanstack/start-server-core': 1.159.0 + '@tanstack/start-client-core': 1.159.4 + '@tanstack/start-plugin-core': 1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/start-server-core': 1.159.4 pathe: 2.0.3 react: 19.2.3 react-dom: 19.2.3(react@19.2.3) @@ -11175,7 +11140,7 @@ snapshots: tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/router-core@1.158.4': + '@tanstack/router-core@1.159.4': dependencies: '@tanstack/history': 1.154.14 '@tanstack/store': 0.8.0 @@ -11185,9 +11150,9 @@ snapshots: tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/router-devtools-core@1.141.1(@tanstack/router-core@1.158.4)(csstype@3.2.3)(solid-js@1.9.10)': + '@tanstack/router-devtools-core@1.141.1(@tanstack/router-core@1.159.4)(csstype@3.2.3)(solid-js@1.9.10)': dependencies: - '@tanstack/router-core': 1.158.4 + '@tanstack/router-core': 1.159.4 clsx: 2.1.1 goober: 2.1.18(csstype@3.2.3) solid-js: 1.9.10 @@ -11195,9 +11160,9 @@ snapshots: optionalDependencies: csstype: 3.2.3 - '@tanstack/router-devtools-core@1.158.4(@tanstack/router-core@1.158.4)(csstype@3.2.3)': + '@tanstack/router-devtools-core@1.159.4(@tanstack/router-core@1.159.4)(csstype@3.2.3)': dependencies: - '@tanstack/router-core': 1.158.4 + '@tanstack/router-core': 1.159.4 clsx: 2.1.1 goober: 2.1.18(csstype@3.2.3) tiny-invariant: 1.3.3 @@ -11230,9 +11195,9 @@ snapshots: transitivePeerDependencies: - supports-color - '@tanstack/router-generator@1.158.4': + '@tanstack/router-generator@1.159.4': dependencies: - '@tanstack/router-core': 1.158.4 + '@tanstack/router-core': 1.159.4 '@tanstack/router-utils': 1.158.0 '@tanstack/virtual-file-routes': 1.154.7 prettier: 3.7.4 @@ -11243,7 +11208,7 @@ snapshots: transitivePeerDependencies: - supports-color - '@tanstack/router-plugin@1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/router-plugin@1.131.50(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/core': 7.28.5 '@babel/plugin-syntax-jsx': 7.27.1(@babel/core@7.28.5) @@ -11260,13 +11225,13 @@ snapshots: unplugin: 2.3.11 zod: 3.25.76 optionalDependencies: - '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-router': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vite-plugin-solid: 2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) transitivePeerDependencies: - supports-color - '@tanstack/router-plugin@1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/router-plugin@1.141.1(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/core': 7.28.5 '@babel/plugin-syntax-jsx': 7.27.1(@babel/core@7.28.5) @@ -11283,13 +11248,13 @@ snapshots: unplugin: 2.3.11 zod: 3.25.76 optionalDependencies: - '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-router': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vite-plugin-solid: 2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) transitivePeerDependencies: - supports-color - '@tanstack/router-plugin@1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/router-plugin@1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/core': 7.28.5 '@babel/plugin-syntax-jsx': 7.27.1(@babel/core@7.28.5) @@ -11297,29 +11262,29 @@ snapshots: '@babel/template': 7.27.2 '@babel/traverse': 7.28.5 '@babel/types': 7.28.5 - '@tanstack/router-core': 1.158.4 - '@tanstack/router-generator': 1.158.4 + '@tanstack/router-core': 1.159.4 + '@tanstack/router-generator': 1.159.4 '@tanstack/router-utils': 1.158.0 '@tanstack/virtual-file-routes': 1.154.7 chokidar: 3.6.0 unplugin: 2.3.11 zod: 3.25.76 optionalDependencies: - '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-router': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vite-plugin-solid: 2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) transitivePeerDependencies: - supports-color - '@tanstack/router-ssr-query-core@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4)': + '@tanstack/router-ssr-query-core@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4)': dependencies: '@tanstack/query-core': 5.90.12 - '@tanstack/router-core': 1.158.4 + '@tanstack/router-core': 1.159.4 - '@tanstack/router-ssr-query-core@1.158.4(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4)': + '@tanstack/router-ssr-query-core@1.159.4(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4)': dependencies: '@tanstack/query-core': 5.90.12 - '@tanstack/router-core': 1.158.4 + '@tanstack/router-core': 1.159.4 '@tanstack/router-utils@1.131.2': dependencies: @@ -11405,20 +11370,20 @@ snapshots: '@tanstack/query-core': 5.90.12 solid-js: 1.9.10 - '@tanstack/solid-router-devtools@1.141.1(@tanstack/router-core@1.158.4)(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(csstype@3.2.3)(solid-js@1.9.10)': + '@tanstack/solid-router-devtools@1.141.1(@tanstack/router-core@1.159.4)(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(csstype@3.2.3)(solid-js@1.9.10)': dependencies: - '@tanstack/router-devtools-core': 1.141.1(@tanstack/router-core@1.158.4)(csstype@3.2.3)(solid-js@1.9.10) + '@tanstack/router-devtools-core': 1.141.1(@tanstack/router-core@1.159.4)(csstype@3.2.3)(solid-js@1.9.10) '@tanstack/solid-router': 1.141.1(solid-js@1.9.10) solid-js: 1.9.10 optionalDependencies: - '@tanstack/router-core': 1.158.4 + '@tanstack/router-core': 1.159.4 transitivePeerDependencies: - csstype - '@tanstack/solid-router-ssr-query@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.2(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3)': + '@tanstack/solid-router-ssr-query@1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4)(@tanstack/solid-query@5.90.15(solid-js@1.9.10))(@tanstack/solid-router@1.141.1(solid-js@1.9.10))(eslint@9.39.2(jiti@2.6.1))(solid-js@1.9.10)(typescript@5.9.3)': dependencies: '@tanstack/query-core': 5.90.12 - '@tanstack/router-ssr-query-core': 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.158.4) + '@tanstack/router-ssr-query-core': 1.141.1(@tanstack/query-core@5.90.12)(@tanstack/router-core@1.159.4) '@tanstack/solid-query': 5.90.15(solid-js@1.9.10) '@tanstack/solid-router': 1.141.1(solid-js@1.9.10) eslint-plugin-solid: 0.14.5(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) @@ -11463,13 +11428,13 @@ snapshots: transitivePeerDependencies: - crossws - '@tanstack/solid-start@1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(solid-js@1.9.10)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/solid-start@1.141.1(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(solid-js@1.9.10)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@tanstack/solid-router': 1.141.1(solid-js@1.9.10) '@tanstack/solid-start-client': 1.141.1(solid-js@1.9.10) '@tanstack/solid-start-server': 1.141.1(solid-js@1.9.10) '@tanstack/start-client-core': 1.141.1 - '@tanstack/start-plugin-core': 1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/start-plugin-core': 1.141.1(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start-server-core': 1.141.1 pathe: 2.0.3 solid-js: 1.9.10 @@ -11554,21 +11519,21 @@ snapshots: tiny-invariant: 1.3.3 tiny-warning: 1.0.3 - '@tanstack/start-client-core@1.158.4': + '@tanstack/start-client-core@1.159.4': dependencies: - '@tanstack/router-core': 1.158.4 + '@tanstack/router-core': 1.159.4 '@tanstack/start-fn-stubs': 1.154.7 - '@tanstack/start-storage-context': 1.158.4 + '@tanstack/start-storage-context': 1.159.4 seroval: 1.5.0 tiny-invariant: 1.3.3 tiny-warning: 1.0.3 '@tanstack/start-config@1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2)': dependencies: - '@tanstack/react-router': 1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@tanstack/react-start-plugin': 1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@vitejs/plugin-react@4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/react-router': 1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@tanstack/react-start-plugin': 1.131.50(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@vitejs/plugin-react@4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-generator': 1.141.1 - '@tanstack/router-plugin': 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/router-plugin': 1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/server-functions-plugin': 1.141.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start-server-functions-handler': 1.120.19 '@vitejs/plugin-react': 4.7.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) @@ -11631,14 +11596,14 @@ snapshots: '@tanstack/start-fn-stubs@1.154.7': {} - '@tanstack/start-plugin-core@1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/start-plugin-core@1.131.50(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(rolldown@1.0.0-beta.53)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/code-frame': 7.26.2 '@babel/core': 7.28.5 '@babel/types': 7.28.5 '@tanstack/router-core': 1.131.50 '@tanstack/router-generator': 1.131.50 - '@tanstack/router-plugin': 1.131.50(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/router-plugin': 1.131.50(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-utils': 1.131.2 '@tanstack/server-functions-plugin': 1.131.2(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start-server-core': 1.131.50 @@ -11649,7 +11614,7 @@ snapshots: h3: 1.13.0 nitropack: 2.13.1(rolldown@1.0.0-beta.53) pathe: 2.0.3 - ufo: 1.6.3 + ufo: 1.6.1 vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vitefu: 1.1.1(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) xmlbuilder2: 3.1.1 @@ -11689,7 +11654,7 @@ snapshots: - webpack - xml2js - '@tanstack/start-plugin-core@1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/start-plugin-core@1.141.1(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/code-frame': 7.26.2 '@babel/core': 7.28.5 @@ -11697,7 +11662,7 @@ snapshots: '@rolldown/pluginutils': 1.0.0-beta.40 '@tanstack/router-core': 1.141.1 '@tanstack/router-generator': 1.141.1 - '@tanstack/router-plugin': 1.141.1(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/router-plugin': 1.141.1(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-utils': 1.141.0 '@tanstack/server-functions-plugin': 1.141.0(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/start-client-core': 1.141.1 @@ -11721,22 +11686,22 @@ snapshots: - vite-plugin-solid - webpack - '@tanstack/start-plugin-core@1.159.0(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@tanstack/start-plugin-core@1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@babel/code-frame': 7.27.1 '@babel/core': 7.28.5 '@babel/types': 7.28.5 '@rolldown/pluginutils': 1.0.0-beta.40 - '@tanstack/router-core': 1.158.4 - '@tanstack/router-generator': 1.158.4 - '@tanstack/router-plugin': 1.158.4(@tanstack/react-router@1.158.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@tanstack/router-core': 1.159.4 + '@tanstack/router-generator': 1.159.4 + '@tanstack/router-plugin': 1.159.5(@tanstack/react-router@1.159.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) '@tanstack/router-utils': 1.158.0 - '@tanstack/start-client-core': 1.158.4 - '@tanstack/start-server-core': 1.159.0 + '@tanstack/start-client-core': 1.159.4 + '@tanstack/start-server-core': 1.159.4 cheerio: 1.1.2 exsolve: 1.0.8 pathe: 2.0.3 - srvx: 0.10.1 + srvx: 0.11.4 tinyglobby: 0.2.15 ufo: 1.6.3 vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -11775,13 +11740,13 @@ snapshots: transitivePeerDependencies: - crossws - '@tanstack/start-server-core@1.159.0': + '@tanstack/start-server-core@1.159.4': dependencies: '@tanstack/history': 1.154.14 - '@tanstack/router-core': 1.158.4 - '@tanstack/start-client-core': 1.158.4 - '@tanstack/start-storage-context': 1.158.4 - h3-v2: h3@2.0.1-rc.11 + '@tanstack/router-core': 1.159.4 + '@tanstack/start-client-core': 1.159.4 + '@tanstack/start-storage-context': 1.159.4 + h3-v2: h3@2.0.1-rc.14 seroval: 1.5.0 tiny-invariant: 1.3.3 transitivePeerDependencies: @@ -11837,9 +11802,9 @@ snapshots: dependencies: '@tanstack/router-core': 1.141.1 - '@tanstack/start-storage-context@1.158.4': + '@tanstack/start-storage-context@1.159.4': dependencies: - '@tanstack/router-core': 1.158.4 + '@tanstack/router-core': 1.159.4 '@tanstack/start@1.120.20(@types/node@24.10.3)(db0@0.3.4)(ioredis@5.8.2)(jiti@2.6.1)(lightningcss@1.30.2)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(rolldown@1.0.0-beta.53)(terser@5.44.1)(tsx@4.21.0)(vite-plugin-solid@2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)))(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(yaml@2.8.2)': dependencies: @@ -12045,17 +12010,8 @@ snapshots: '@types/ms@2.1.0': {} - '@types/node-fetch@2.6.13': - dependencies: - '@types/node': 24.10.3 - form-data: 4.0.5 - '@types/node@12.20.55': {} - '@types/node@18.19.130': - dependencies: - undici-types: 5.26.5 - '@types/node@20.19.26': dependencies: undici-types: 6.21.0 @@ -12306,7 +12262,7 @@ snapshots: node-forge: 1.3.3 pathe: 1.1.2 std-env: 3.10.0 - ufo: 1.6.3 + ufo: 1.6.1 untun: 0.1.3 uqr: 0.1.2 @@ -12681,10 +12637,6 @@ snapshots: agent-base@7.1.4: {} - agentkeepalive@4.6.0: - dependencies: - humanize-ms: 1.2.1 - ajv-draft-04@1.0.0(ajv@8.13.0): optionalDependencies: ajv: 8.13.0 @@ -13970,8 +13922,6 @@ snapshots: cross-spawn: 7.0.6 signal-exit: 4.1.0 - form-data-encoder@1.7.2: {} - form-data@4.0.5: dependencies: asynckit: 0.4.0 @@ -13984,11 +13934,6 @@ snapshots: dependencies: fd-package-json: 2.0.0 - formdata-node@4.4.1: - dependencies: - node-domexception: 1.0.0 - web-streams-polyfill: 4.0.0-beta.3 - formdata-polyfill@4.0.10: dependencies: fetch-blob: 3.2.0 @@ -14165,18 +14110,6 @@ snapshots: graceful-fs@4.2.11: {} - groq-sdk@0.37.0: - dependencies: - '@types/node': 18.19.130 - '@types/node-fetch': 2.6.13 - abort-controller: 3.0.0 - agentkeepalive: 4.6.0 - form-data-encoder: 1.7.2 - formdata-node: 4.4.1 - node-fetch: 2.7.0 - transitivePeerDependencies: - - encoding - gtoken@8.0.0: dependencies: gaxios: 7.1.3 @@ -14197,7 +14130,7 @@ snapshots: iron-webcrypto: 1.2.1 ohash: 1.1.6 radix3: 1.1.2 - ufo: 1.6.3 + ufo: 1.6.1 uncrypto: 0.1.3 unenv: 1.10.0 @@ -14232,10 +14165,10 @@ snapshots: rou3: 0.7.12 srvx: 0.8.16 - h3@2.0.1-rc.11: + h3@2.0.1-rc.14: dependencies: rou3: 0.7.12 - srvx: 0.10.1 + srvx: 0.11.4 happy-dom@20.0.11: dependencies: @@ -14428,10 +14361,6 @@ snapshots: human-signals@5.0.0: {} - humanize-ms@1.2.1: - dependencies: - ms: 2.1.3 - iconv-lite@0.6.3: dependencies: safer-buffer: 2.1.2 @@ -16735,7 +16664,7 @@ snapshots: sprintf-js@1.0.3: {} - srvx@0.10.1: {} + srvx@0.11.4: {} srvx@0.8.16: {} @@ -17105,8 +17034,6 @@ snapshots: magic-string: 0.30.21 unplugin: 2.3.11 - undici-types@5.26.5: {} - undici-types@5.28.4: {} undici-types@6.21.0: {} @@ -17379,7 +17306,7 @@ snapshots: hookable: 5.5.3 http-proxy: 1.18.1 micromatch: 4.0.8 - nitropack: 2.13.1(rolldown@1.0.0-beta.53) + nitropack: 2.12.9(rolldown@1.0.0-beta.53) node-fetch-native: 1.6.7 path-to-regexp: 6.3.0 pathe: 1.1.2 @@ -17798,8 +17725,6 @@ snapshots: web-streams-polyfill@3.3.3: {} - web-streams-polyfill@4.0.0-beta.3: {} - web-vitals@5.1.0: {} webidl-conversions@3.0.1: {} From 55db3b141ad819c8c0defeb79e87f22efbaba839 Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Thu, 12 Feb 2026 18:44:15 +0100 Subject: [PATCH 14/16] feat(ai-groq): add Groq adapter package Introduce a new Groq adapter to enable fast LLM inference via Groq's API. Includes TypeScript configuration and Vite build setup for consistent tooling across the AI packages. --- pnpm-lock.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index cf0c0caf..14713b56 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -731,7 +731,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -7899,7 +7899,7 @@ packages: tar@7.5.2: resolution: {integrity: sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==} engines: {node: '>=18'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} From 5c56dff51f17c305bb9a5152d9cbd8ce99ecd5ae Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Wed, 11 Feb 2026 11:41:49 +0100 Subject: [PATCH 15/16] feat: opus 4.6 model & additional config for provider clients (#278) * feat: opus 4.6 model & additional config for provider clients * fix: isue with gemini adapter --- pnpm-lock.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 14713b56..cf0c0caf 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -731,7 +731,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -7899,7 +7899,7 @@ packages: tar@7.5.2: resolution: {integrity: sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==} engines: {node: '>=18'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} From 2a6a1a3dc6cb5fe4e60ce16849522f8dbe239795 Mon Sep 17 00:00:00 2001 From: Dhamivibez Date: Thu, 12 Feb 2026 18:58:05 +0100 Subject: [PATCH 16/16] Add groq-sdk dependency to ai-groq package --- packages/typescript/ai-groq/package.json | 5 +- pnpm-lock.yaml | 75 +++++++++++++++++++++++- 2 files changed, 78 insertions(+), 2 deletions(-) diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json index c3004834..157cea01 100644 --- a/packages/typescript/ai-groq/package.json +++ b/packages/typescript/ai-groq/package.json @@ -46,5 +46,8 @@ "@tanstack/ai": "workspace:^", "zod": "^4.0.0" }, - "packageManager": "pnpm@10.17.0" + "packageManager": "pnpm@10.17.0", + "dependencies": { + "groq-sdk": "^0.37.0" + } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index cf0c0caf..db8c1053 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -725,6 +725,9 @@ importers: '@tanstack/ai': specifier: workspace:^ version: link:../ai + groq-sdk: + specifier: ^0.37.0 + version: 0.37.0 zod: specifier: ^4.0.0 version: 4.2.1 @@ -4068,9 +4071,15 @@ packages: '@types/ms@2.1.0': resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} + '@types/node-fetch@2.6.13': + resolution: {integrity: sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==} + '@types/node@12.20.55': resolution: {integrity: sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ==} + '@types/node@18.19.130': + resolution: {integrity: sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==} + '@types/node@20.19.26': resolution: {integrity: sha512-0l6cjgF0XnihUpndDhk+nyD3exio3iKaYROSgvh/qSevPXax3L8p5DBRFjbvalnwatGgHEQn2R88y2fA3g4irg==} @@ -4509,6 +4518,10 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} + agentkeepalive@4.6.0: + resolution: {integrity: sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==} + engines: {node: '>= 8.0.0'} + ajv-draft-04@1.0.0: resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==} peerDependencies: @@ -5629,6 +5642,9 @@ packages: resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} engines: {node: '>=14'} + form-data-encoder@1.7.2: + resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} + form-data@4.0.5: resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} engines: {node: '>= 6'} @@ -5638,6 +5654,10 @@ packages: engines: {node: '>=18.3.0'} hasBin: true + formdata-node@4.4.1: + resolution: {integrity: sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==} + engines: {node: '>= 12.20'} + formdata-polyfill@4.0.10: resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} engines: {node: '>=12.20.0'} @@ -5789,6 +5809,9 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + groq-sdk@0.37.0: + resolution: {integrity: sha512-lT72pcT8b/X5XrzdKf+rWVzUGW1OQSKESmL8fFN5cTbsf02gq6oFam4SVeNtzELt9cYE2Pt3pdGgSImuTbHFDg==} + gtoken@8.0.0: resolution: {integrity: sha512-+CqsMbHPiSTdtSO14O51eMNlrp9N79gmeqmXeouJOhfucAedHw9noVe/n5uJk3tbKE6a+6ZCQg3RPhVhHByAIw==} engines: {node: '>=18'} @@ -5954,6 +5977,9 @@ packages: resolution: {integrity: sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==} engines: {node: '>=16.17.0'} + humanize-ms@1.2.1: + resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} + iconv-lite@0.6.3: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} @@ -7899,7 +7925,7 @@ packages: tar@7.5.2: resolution: {integrity: sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==} engines: {node: '>=18'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} @@ -8115,6 +8141,9 @@ packages: unctx@2.5.0: resolution: {integrity: sha512-p+Rz9x0R7X+CYDkT+Xg8/GhpcShTlU8n+cf9OtOEf7zEQsNcCZO1dPKNRDqvUTaq+P32PMMkxWHwfrxkqfqAYg==} + undici-types@5.26.5: + resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + undici-types@5.28.4: resolution: {integrity: sha512-3OeMF5Lyowe8VW0skf5qaIE7Or3yS9LS7fvMUI0gg4YxpIBVg0L8BxCmROw2CcYhSkpR68Epz7CGc8MPj94Uww==} @@ -8696,6 +8725,10 @@ packages: resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} engines: {node: '>= 8'} + web-streams-polyfill@4.0.0-beta.3: + resolution: {integrity: sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==} + engines: {node: '>= 14'} + web-vitals@5.1.0: resolution: {integrity: sha512-ArI3kx5jI0atlTtmV0fWU3fjpLmq/nD3Zr1iFFlJLaqa5wLBkUSzINwBPySCX/8jRyjlmy1Volw1kz1g9XE4Jg==} @@ -12010,8 +12043,17 @@ snapshots: '@types/ms@2.1.0': {} + '@types/node-fetch@2.6.13': + dependencies: + '@types/node': 24.10.3 + form-data: 4.0.5 + '@types/node@12.20.55': {} + '@types/node@18.19.130': + dependencies: + undici-types: 5.26.5 + '@types/node@20.19.26': dependencies: undici-types: 6.21.0 @@ -12637,6 +12679,10 @@ snapshots: agent-base@7.1.4: {} + agentkeepalive@4.6.0: + dependencies: + humanize-ms: 1.2.1 + ajv-draft-04@1.0.0(ajv@8.13.0): optionalDependencies: ajv: 8.13.0 @@ -13922,6 +13968,8 @@ snapshots: cross-spawn: 7.0.6 signal-exit: 4.1.0 + form-data-encoder@1.7.2: {} + form-data@4.0.5: dependencies: asynckit: 0.4.0 @@ -13934,6 +13982,11 @@ snapshots: dependencies: fd-package-json: 2.0.0 + formdata-node@4.4.1: + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 4.0.0-beta.3 + formdata-polyfill@4.0.10: dependencies: fetch-blob: 3.2.0 @@ -14110,6 +14163,18 @@ snapshots: graceful-fs@4.2.11: {} + groq-sdk@0.37.0: + dependencies: + '@types/node': 18.19.130 + '@types/node-fetch': 2.6.13 + abort-controller: 3.0.0 + agentkeepalive: 4.6.0 + form-data-encoder: 1.7.2 + formdata-node: 4.4.1 + node-fetch: 2.7.0 + transitivePeerDependencies: + - encoding + gtoken@8.0.0: dependencies: gaxios: 7.1.3 @@ -14361,6 +14426,10 @@ snapshots: human-signals@5.0.0: {} + humanize-ms@1.2.1: + dependencies: + ms: 2.1.3 + iconv-lite@0.6.3: dependencies: safer-buffer: 2.1.2 @@ -17034,6 +17103,8 @@ snapshots: magic-string: 0.30.21 unplugin: 2.3.11 + undici-types@5.26.5: {} + undici-types@5.28.4: {} undici-types@6.21.0: {} @@ -17725,6 +17796,8 @@ snapshots: web-streams-polyfill@3.3.3: {} + web-streams-polyfill@4.0.0-beta.3: {} + web-vitals@5.1.0: {} webidl-conversions@3.0.1: {}