diff --git a/knip.json b/knip.json index b777d8c2a..036404ee4 100644 --- a/knip.json +++ b/knip.json @@ -7,7 +7,6 @@ "docs" ], "workspaces": { - "packages/appkit": {}, "packages/appkit-ui": { "ignoreDependencies": ["tailwindcss", "tw-animate-css"] } @@ -17,6 +16,11 @@ "**/*.example.tsx", "**/*.css", "packages/appkit/src/plugins/vector-search/**", + "packages/appkit/src/plugin/index.ts", + "packages/appkit/src/plugins/agents/index.ts", + "packages/appkit/src/plugins/agents/tools/index.ts", + "packages/appkit/src/plugins/agents/from-plugin.ts", + "packages/appkit/src/plugins/agents/load-agents.ts", "template/**", "tools/**", "docs/**" diff --git a/packages/appkit/package.json b/packages/appkit/package.json index 146be5a90..38dc3946c 100644 --- a/packages/appkit/package.json +++ b/packages/appkit/package.json @@ -29,6 +29,10 @@ "development": "./src/index.ts", "default": "./dist/index.js" }, + "./agents/databricks": { + "development": "./src/agents/databricks.ts", + "default": "./dist/agents/databricks.js" + }, "./type-generator": { "types": "./dist/type-generator/index.d.ts", "development": "./src/type-generator/index.ts", @@ -93,6 +97,7 @@ "publishConfig": { "exports": { ".": "./dist/index.js", + "./agents/databricks": "./dist/agents/databricks.js", "./dist/shared/src/plugin": "./dist/shared/src/plugin.d.ts", "./type-generator": "./dist/type-generator/index.js", "./package.json": "./package.json" diff --git a/packages/appkit/src/agents/databricks.ts b/packages/appkit/src/agents/databricks.ts new file mode 100644 index 000000000..b74053eef --- /dev/null +++ b/packages/appkit/src/agents/databricks.ts @@ -0,0 +1,609 @@ +import type { + AgentAdapter, + AgentEvent, + AgentInput, + AgentRunContext, + AgentToolDefinition, +} from "shared"; +import { stream as servingStream } from "../connectors/serving/client"; + +/** + * Transport shim: given an OpenAI-compatible request body, returns the raw + * SSE byte stream from the serving endpoint. Injected at construction time so + * callers can swap in the workspace SDK (factory paths), a bare `fetch` + * (the raw constructor), or a test fake. + */ +type StreamBody = ( + body: Record, + signal?: AbortSignal, +) => Promise>; + +/** + * Escape-hatch options: provide an `endpointUrl` + `authenticate()` and the + * adapter uses a bare `fetch()` to call it. Useful for tests and for pointing + * the adapter at non-workspace endpoints (reverse proxies, mocks). + */ +interface RawFetchAdapterOptions { + endpointUrl: string; + authenticate: () => Promise>; + maxSteps?: number; + maxTokens?: number; +} + +/** + * Preferred options: caller provides the transport function directly. + * The `fromServingEndpoint` / `fromModelServing` factories use this to route + * through `connectors/serving/stream`, which centralises URL encoding, auth + * via the SDK's `apiClient.request`, and any future retries/telemetry. + */ +interface StreamBodyAdapterOptions { + streamBody: StreamBody; + maxSteps?: number; + maxTokens?: number; +} + +type DatabricksAdapterOptions = + | RawFetchAdapterOptions + | StreamBodyAdapterOptions; + +function isStreamBodyOptions( + o: DatabricksAdapterOptions, +): o is StreamBodyAdapterOptions { + return "streamBody" in o; +} + +/** + * Duck-typed subset of the Databricks SDK `WorkspaceClient`. Callers of + * `fromServingEndpoint` and `fromModelServing` pass a real `WorkspaceClient`, + * but we only need the `apiClient.request` surface — so we declare the minimal + * interface rather than importing the SDK type directly. This keeps the adapter + * free of a hard compile-time dependency on `@databricks/sdk-experimental`. + */ +interface WorkspaceClientLike { + apiClient: { + request(options: Record): Promise; + }; +} + +interface ServingEndpointOptions { + workspaceClient: WorkspaceClientLike; + endpointName: string; + maxSteps?: number; + maxTokens?: number; +} + +interface ModelServingOptions { + maxSteps?: number; + maxTokens?: number; + workspaceClient?: WorkspaceClientLike; +} + +interface OpenAIMessage { + role: "system" | "user" | "assistant" | "tool"; + content: string | null; + tool_calls?: OpenAIToolCall[]; + tool_call_id?: string; +} + +interface OpenAIToolCall { + id: string; + type: "function"; + function: { name: string; arguments: string }; +} + +interface OpenAITool { + type: "function"; + function: { + name: string; + description: string; + parameters: unknown; + }; +} + +interface DeltaToolCall { + index: number; + id?: string; + type?: string; + function?: { name?: string; arguments?: string }; +} + +/** + * Adapter that talks directly to Databricks Model Serving `/invocations` endpoint. + * + * No dependency on the Vercel AI SDK or LangChain. Uses raw `fetch()` to POST + * OpenAI-compatible payloads and parses the SSE stream itself. Calls + * `authenticate()` per-request so tokens are always fresh. + * + * Handles both structured `tool_calls` responses and text-based tool call + * fallback parsing for models that output tool calls as text. + * + * @example Using the factory (recommended) + * ```ts + * import { createApp, createAgent, agents } from "@databricks/appkit"; + * import { DatabricksAdapter } from "@databricks/appkit/agents/databricks"; + * import { WorkspaceClient } from "@databricks/sdk-experimental"; + * + * const adapter = DatabricksAdapter.fromServingEndpoint({ + * workspaceClient: new WorkspaceClient({}), + * endpointName: "my-endpoint", + * }); + * + * await createApp({ + * plugins: [ + * agents({ + * agents: { + * assistant: createAgent({ + * instructions: "You are a helpful assistant.", + * model: adapter, + * }), + * }, + * }), + * ], + * }); + * ``` + * + * @example Using the raw constructor + * ```ts + * const adapter = new DatabricksAdapter({ + * endpointUrl: "https://host/serving-endpoints/my-endpoint/invocations", + * authenticate: async () => ({ Authorization: `Bearer ${token}` }), + * }); + * ``` + */ +export class DatabricksAdapter implements AgentAdapter { + private streamBody: StreamBody; + private maxSteps: number; + private maxTokens: number; + + constructor(options: DatabricksAdapterOptions) { + this.maxSteps = options.maxSteps ?? 10; + this.maxTokens = options.maxTokens ?? 4096; + + if (isStreamBodyOptions(options)) { + this.streamBody = options.streamBody; + } else { + const { endpointUrl, authenticate } = options; + this.streamBody = async (body, signal) => { + const authHeaders = await authenticate(); + const response = await fetch(endpointUrl, { + method: "POST", + headers: { + "Content-Type": "application/json", + ...authHeaders, + }, + body: JSON.stringify(body), + signal, + }); + if (!response.ok) { + const errorText = await response.text().catch(() => "Unknown error"); + throw new Error( + `Databricks API error (${response.status}): ${errorText}`, + ); + } + if (!response.body) throw new Error("No response body"); + return response.body; + }; + } + } + + /** + * Creates a DatabricksAdapter for a Databricks Model Serving endpoint. + * + * Routes through the shared `connectors/serving/stream` helper, which + * delegates to the SDK's `apiClient.request({ raw: true })`. That gives the + * adapter centralised URL encoding + authentication with the rest of the + * serving surface — no bespoke `fetch()` + `authenticate()` plumbing. + */ + static async fromServingEndpoint( + options: ServingEndpointOptions, + ): Promise { + const { workspaceClient, endpointName, maxSteps, maxTokens } = options; + return new DatabricksAdapter({ + streamBody: (body) => + // Cast through the structural shape: the connector types + // `workspaceClient` as the SDK's concrete `WorkspaceClient`, but we + // only need `apiClient.request`. + servingStream( + workspaceClient as unknown as Parameters[0], + endpointName, + body, + ), + maxSteps, + maxTokens, + }); + } + + /** + * Creates a DatabricksAdapter from a Model Serving endpoint name. + * Auto-creates a WorkspaceClient internally. Reads the endpoint name + * from the argument or the `DATABRICKS_AGENT_ENDPOINT` env var. + * + * @example + * ```ts + * // Reads endpoint from DATABRICKS_AGENT_ENDPOINT env var + * const adapter = await DatabricksAdapter.fromModelServing(); + * + * // Explicit endpoint + * const adapter = await DatabricksAdapter.fromModelServing("my-endpoint"); + * + * // With options + * const adapter = await DatabricksAdapter.fromModelServing("my-endpoint", { + * maxSteps: 5, + * maxTokens: 2048, + * }); + * ``` + */ + static async fromModelServing( + endpointName?: string, + options?: ModelServingOptions, + ): Promise { + const resolvedEndpoint = + endpointName ?? process.env.DATABRICKS_AGENT_ENDPOINT; + + if (!resolvedEndpoint) { + throw new Error( + "No endpoint name provided and DATABRICKS_AGENT_ENDPOINT env var is not set. " + + "Pass an endpoint name or set the environment variable.", + ); + } + + let workspaceClient: WorkspaceClientLike | undefined = + options?.workspaceClient; + if (!workspaceClient) { + const sdk = await import("@databricks/sdk-experimental"); + workspaceClient = new sdk.WorkspaceClient( + {}, + ) as unknown as WorkspaceClientLike; + } + + return DatabricksAdapter.fromServingEndpoint({ + workspaceClient, + endpointName: resolvedEndpoint, + maxSteps: options?.maxSteps, + maxTokens: options?.maxTokens, + }); + } + + async *run( + input: AgentInput, + context: AgentRunContext, + ): AsyncGenerator { + // Databricks API requires tool names to match [a-zA-Z0-9_-]. + // Our tool names use dots (e.g. "analytics.query"), so we swap dots + // for double-underscores in the wire format and map back on receipt. + const nameToWire = new Map(); + const wireToName = new Map(); + for (const tool of input.tools) { + const wire = tool.name.replace(/\./g, "__"); + nameToWire.set(tool.name, wire); + wireToName.set(wire, tool.name); + } + + const tools = this.buildTools(input.tools, nameToWire); + const messages = this.buildMessages(input.messages); + + yield { type: "status", status: "running" }; + + for (let step = 0; step < this.maxSteps; step++) { + if (context.signal?.aborted) break; + + const { text, toolCalls } = yield* this.streamCompletion( + messages, + tools, + context, + ); + + if (toolCalls.length === 0) { + const parsed = parseTextToolCalls(text); + if (parsed.length > 0) { + yield* this.executeToolCalls(parsed, messages, context); + continue; + } + break; + } + + messages.push({ + role: "assistant", + content: text || null, + tool_calls: toolCalls, + }); + + for (const tc of toolCalls) { + const wireName = tc.function.name; + const originalName = wireToName.get(wireName) ?? wireName; + let args: unknown; + try { + args = JSON.parse(tc.function.arguments); + } catch { + args = {}; + } + + yield { type: "tool_call", callId: tc.id, name: originalName, args }; + + try { + const result = await context.executeTool(originalName, args); + const resultStr = + typeof result === "string" ? result : JSON.stringify(result); + + yield { type: "tool_result", callId: tc.id, result }; + + messages.push({ + role: "tool", + content: resultStr, + tool_call_id: tc.id, + }); + } catch (error) { + const errMsg = + error instanceof Error ? error.message : "Tool execution failed"; + + yield { + type: "tool_result", + callId: tc.id, + result: null, + error: errMsg, + }; + + messages.push({ + role: "tool", + content: JSON.stringify({ error: errMsg }), + tool_call_id: tc.id, + }); + } + } + } + } + + private async *streamCompletion( + messages: OpenAIMessage[], + tools: OpenAITool[], + context: AgentRunContext, + ): AsyncGenerator< + AgentEvent, + { text: string; toolCalls: OpenAIToolCall[] }, + unknown + > { + const body: Record = { + messages, + stream: true, + max_tokens: this.maxTokens, + }; + + if (tools.length > 0) { + body.tools = tools; + } + + const responseBody = await this.streamBody(body, context.signal); + const reader = responseBody.getReader(); + + const decoder = new TextDecoder(); + let buffer = ""; + let fullText = ""; + const toolCallAccumulator = new Map< + number, + { id: string; name: string; arguments: string } + >(); + + try { + while (true) { + if (context.signal?.aborted) break; + + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() ?? ""; + + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed.startsWith("data: ")) continue; + const data = trimmed.slice(6); + if (data === "[DONE]") continue; + + let parsed: any; + try { + parsed = JSON.parse(data); + } catch { + continue; + } + + const delta = parsed.choices?.[0]?.delta; + if (!delta) continue; + + if (delta.content) { + fullText += delta.content; + yield { type: "message_delta" as const, content: delta.content }; + } + + if (delta.tool_calls) { + for (const tc of delta.tool_calls as DeltaToolCall[]) { + const existing = toolCallAccumulator.get(tc.index); + if (existing) { + if (tc.function?.arguments) { + existing.arguments += tc.function.arguments; + } + } else { + toolCallAccumulator.set(tc.index, { + id: tc.id ?? `call_${tc.index}`, + name: tc.function?.name ?? "", + arguments: tc.function?.arguments ?? "", + }); + } + } + } + } + } + } finally { + reader.releaseLock(); + } + + const toolCalls: OpenAIToolCall[] = Array.from( + toolCallAccumulator.values(), + ).map((tc) => ({ + id: tc.id, + type: "function" as const, + function: { name: tc.name, arguments: tc.arguments || "{}" }, + })); + + return { text: fullText, toolCalls }; + } + + private async *executeToolCalls( + calls: Array<{ name: string; args: unknown }>, + messages: OpenAIMessage[], + context: AgentRunContext, + ): AsyncGenerator { + const toolCallObjs: OpenAIToolCall[] = calls.map((c, i) => ({ + id: `text_call_${i}`, + type: "function" as const, + function: { + name: c.name, + arguments: JSON.stringify(c.args), + }, + })); + + messages.push({ + role: "assistant", + content: null, + tool_calls: toolCallObjs, + }); + + for (const tc of toolCallObjs) { + const name = tc.function.name; + let args: unknown; + try { + args = JSON.parse(tc.function.arguments); + } catch { + args = {}; + } + + yield { type: "tool_call", callId: tc.id, name, args }; + + try { + const result = await context.executeTool(name, args); + const resultStr = + typeof result === "string" ? result : JSON.stringify(result); + + yield { type: "tool_result", callId: tc.id, result }; + + messages.push({ + role: "tool", + content: resultStr, + tool_call_id: tc.id, + }); + } catch (error) { + const errMsg = + error instanceof Error ? error.message : "Tool execution failed"; + + yield { + type: "tool_result", + callId: tc.id, + result: null, + error: errMsg, + }; + + messages.push({ + role: "tool", + content: JSON.stringify({ error: errMsg }), + tool_call_id: tc.id, + }); + } + } + } + + private buildMessages(messages: AgentInput["messages"]): OpenAIMessage[] { + return messages.map((m) => ({ + role: m.role as OpenAIMessage["role"], + content: m.content, + })); + } + + private buildTools( + definitions: AgentToolDefinition[], + nameToWire: Map, + ): OpenAITool[] { + return definitions.map((def) => ({ + type: "function" as const, + function: { + name: nameToWire.get(def.name) ?? def.name, + description: def.description, + parameters: def.parameters, + }, + })); + } +} + +// --------------------------------------------------------------------------- +// Text-based tool call parsing (fallback) +// --------------------------------------------------------------------------- + +/** + * Parses text-based tool calls from model output. + * + * Handles two formats: + * 1. Llama native: `[{"name": "tool_name", "parameters": {"arg": "val"}}]` + * 2. Python-style: `[tool_name(arg1='val1', arg2='val2')]` + */ +export function parseTextToolCalls( + text: string, +): Array<{ name: string; args: unknown }> { + const trimmed = text.trim(); + + const jsonResult = tryParseLlamaJsonToolCalls(trimmed); + if (jsonResult.length > 0) return jsonResult; + + const pyResult = tryParsePythonStyleToolCalls(trimmed); + if (pyResult.length > 0) return pyResult; + + return []; +} + +function tryParseLlamaJsonToolCalls( + text: string, +): Array<{ name: string; args: unknown }> { + const match = text.match(/\[\s*\{[\s\S]*\}\s*\]/); + if (!match) return []; + + try { + const parsed = JSON.parse(match[0]); + if (!Array.isArray(parsed)) return []; + + return parsed + .filter( + (item: any) => + typeof item === "object" && + item !== null && + typeof item.name === "string", + ) + .map((item: any) => ({ + name: item.name, + args: item.parameters ?? item.arguments ?? item.args ?? {}, + })); + } catch { + return []; + } +} + +function tryParsePythonStyleToolCalls( + text: string, +): Array<{ name: string; args: unknown }> { + const pattern = /\[?([a-zA-Z_][\w.]*)\(([^)]*)\)\]?/g; + const results: Array<{ name: string; args: unknown }> = []; + + for (const match of text.matchAll(pattern)) { + const name = match[1]; + const argsStr = match[2]; + + const args: Record = {}; + const argPattern = /(\w+)\s*=\s*(?:'([^']*)'|"([^"]*)"|(\S+))/g; + for (const argMatch of argsStr.matchAll(argPattern)) { + const key = argMatch[1]; + const value = argMatch[2] ?? argMatch[3] ?? argMatch[4]; + args[key] = value; + } + + results.push({ name, args }); + } + + return results; +} diff --git a/packages/appkit/src/agents/tests/databricks.test.ts b/packages/appkit/src/agents/tests/databricks.test.ts new file mode 100644 index 000000000..8a835094e --- /dev/null +++ b/packages/appkit/src/agents/tests/databricks.test.ts @@ -0,0 +1,486 @@ +import type { AgentEvent, AgentToolDefinition, Message } from "shared"; +import { afterEach, beforeEach, describe, expect, test, vi } from "vitest"; +import { DatabricksAdapter, parseTextToolCalls } from "../databricks"; + +const mockAuthenticate = vi + .fn() + .mockResolvedValue({ Authorization: "Bearer test-token" }); + +function sseChunk(data: string): string { + return `data: ${data}\n\n`; +} + +function textDelta(content: string): string { + return sseChunk( + JSON.stringify({ + choices: [{ delta: { content } }], + }), + ); +} + +function toolCallDelta( + index: number, + id: string | undefined, + name: string | undefined, + args: string, +): string { + return sseChunk( + JSON.stringify({ + choices: [ + { + delta: { + tool_calls: [ + { + index, + ...(id && { id }), + ...(name && { type: "function" }), + function: { + ...(name && { name }), + arguments: args, + }, + }, + ], + }, + }, + ], + }), + ); +} + +function createReadableStream(chunks: string[]): ReadableStream { + const encoder = new TextEncoder(); + let i = 0; + return new ReadableStream({ + pull(controller) { + if (i < chunks.length) { + controller.enqueue(encoder.encode(chunks[i])); + i++; + } else { + controller.close(); + } + }, + }); +} + +function mockFetch(chunks: string[]): typeof globalThis.fetch { + return vi.fn().mockResolvedValue({ + ok: true, + body: createReadableStream(chunks), + text: () => Promise.resolve(""), + }); +} + +function createTestMessages(): Message[] { + return [{ id: "1", role: "user", content: "Hello", createdAt: new Date() }]; +} + +function createTestTools(): AgentToolDefinition[] { + return [ + { + name: "analytics.query", + description: "Run SQL", + parameters: { + type: "object", + properties: { query: { type: "string" } }, + required: ["query"], + }, + }, + ]; +} + +function createAdapter(overrides?: { + endpointUrl?: string; + authenticate?: () => Promise>; + maxSteps?: number; + maxTokens?: number; +}) { + return new DatabricksAdapter({ + endpointUrl: + "https://test.databricks.com/serving-endpoints/my-endpoint/invocations", + authenticate: mockAuthenticate, + ...overrides, + }); +} + +describe("DatabricksAdapter", () => { + const originalFetch = globalThis.fetch; + + afterEach(() => { + globalThis.fetch = originalFetch; + mockAuthenticate.mockClear(); + }); + + test("streams text deltas from the model", async () => { + globalThis.fetch = mockFetch([ + textDelta("Hello"), + textDelta(" world"), + sseChunk("[DONE]"), + ]); + + const adapter = createAdapter(); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + events.push(event); + } + + expect(events[0]).toEqual({ type: "status", status: "running" }); + expect(events[1]).toEqual({ type: "message_delta", content: "Hello" }); + expect(events[2]).toEqual({ type: "message_delta", content: " world" }); + }); + + test("calls authenticate() per request for fresh headers", async () => { + globalThis.fetch = mockFetch([textDelta("Hi"), sseChunk("[DONE]")]); + + const adapter = createAdapter(); + + for await (const _ of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + // drain + } + + expect(mockAuthenticate).toHaveBeenCalledTimes(1); + + const [, init] = (globalThis.fetch as any).mock.calls[0]; + expect(init.headers.Authorization).toBe("Bearer test-token"); + }); + + test("handles structured tool calls and executes them", async () => { + const executeTool = vi.fn().mockResolvedValue([{ trip_id: 1 }]); + + let callCount = 0; + globalThis.fetch = vi.fn().mockImplementation(() => { + callCount++; + if (callCount === 1) { + return Promise.resolve({ + ok: true, + body: createReadableStream([ + toolCallDelta(0, "call_1", "analytics__query", ""), + toolCallDelta(0, undefined, undefined, '{"query":'), + toolCallDelta(0, undefined, undefined, '"SELECT 1"}'), + sseChunk("[DONE]"), + ]), + }); + } + return Promise.resolve({ + ok: true, + body: createReadableStream([ + textDelta("Here are the results"), + sseChunk("[DONE]"), + ]), + }); + }); + + const adapter = createAdapter(); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool }, + )) { + events.push(event); + } + + expect(events).toContainEqual({ + type: "tool_call", + callId: "call_1", + name: "analytics.query", + args: { query: "SELECT 1" }, + }); + + expect(executeTool).toHaveBeenCalledWith("analytics.query", { + query: "SELECT 1", + }); + + expect(events).toContainEqual( + expect.objectContaining({ + type: "tool_result", + callId: "call_1", + result: [{ trip_id: 1 }], + }), + ); + + expect(events).toContainEqual({ + type: "message_delta", + content: "Here are the results", + }); + + // authenticate() called once per streamCompletion + expect(mockAuthenticate).toHaveBeenCalledTimes(2); + }); + + test("respects maxSteps limit", async () => { + globalThis.fetch = vi.fn().mockImplementation(() => + Promise.resolve({ + ok: true, + body: createReadableStream([ + toolCallDelta( + 0, + "call_loop", + "analytics__query", + '{"query":"SELECT 1"}', + ), + sseChunk("[DONE]"), + ]), + }), + ); + + const adapter = createAdapter({ maxSteps: 2 }); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn().mockResolvedValue("ok") }, + )) { + events.push(event); + } + + expect(globalThis.fetch).toHaveBeenCalledTimes(2); + }); + + test("sends correct request to endpoint URL", async () => { + globalThis.fetch = mockFetch([textDelta("Hi"), sseChunk("[DONE]")]); + + const adapter = createAdapter(); + + for await (const _ of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + // drain + } + + const [url, init] = (globalThis.fetch as any).mock.calls[0]; + expect(url).toBe( + "https://test.databricks.com/serving-endpoints/my-endpoint/invocations", + ); + + const body = JSON.parse(init.body); + expect(body.stream).toBe(true); + expect(body.tools).toHaveLength(1); + expect(body.tools[0].function.name).toBe("analytics__query"); + expect(body.messages[0]).toEqual({ + role: "user", + content: "Hello", + }); + }); + + test("throws on non-ok response", async () => { + globalThis.fetch = vi.fn().mockResolvedValue({ + ok: false, + status: 401, + text: () => Promise.resolve("Unauthorized"), + }); + + const adapter = createAdapter(); + + await expect(async () => { + for await (const _ of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + // drain + } + }).rejects.toThrow("Databricks API error (401): Unauthorized"); + }); +}); + +describe("DatabricksAdapter.fromServingEndpoint", () => { + test("routes tool-free chat through apiClient.request with a streaming payload", async () => { + const apiClient = { + request: vi.fn().mockResolvedValue({ + contents: createReadableStream([textDelta("Hi"), sseChunk("[DONE]")]), + }), + }; + + const adapter = await DatabricksAdapter.fromServingEndpoint({ + workspaceClient: { apiClient }, + endpointName: "my-model", + }); + + for await (const _ of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + // drain + } + + expect(apiClient.request).toHaveBeenCalledTimes(1); + const [requestArgs] = apiClient.request.mock.calls[0]; + expect(requestArgs.path).toBe("/serving-endpoints/my-model/invocations"); + expect(requestArgs.method).toBe("POST"); + expect(requestArgs.raw).toBe(true); + expect(requestArgs.payload.stream).toBe(true); + // Auth + url encoding are the connector's (and the SDK's) concerns — the + // adapter no longer reaches into the workspace config. + }); + + test("URL-encodes endpoint names with special characters", async () => { + const apiClient = { + request: vi.fn().mockResolvedValue({ + contents: createReadableStream([textDelta("Hi"), sseChunk("[DONE]")]), + }), + }; + + const adapter = await DatabricksAdapter.fromServingEndpoint({ + workspaceClient: { apiClient }, + endpointName: "my model/with spaces", + }); + + for await (const _ of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + // drain + } + + const [requestArgs] = apiClient.request.mock.calls[0]; + expect(requestArgs.path).toBe( + "/serving-endpoints/my%20model%2Fwith%20spaces/invocations", + ); + }); +}); + +describe("DatabricksAdapter.fromModelServing", () => { + const originalEnv = process.env; + + beforeEach(() => { + process.env = { ...originalEnv }; + }); + + afterEach(() => { + process.env = originalEnv; + }); + + test("reads endpoint from DATABRICKS_AGENT_ENDPOINT env var", async () => { + process.env.DATABRICKS_AGENT_ENDPOINT = "my-model"; + + vi.mock("@databricks/sdk-experimental", () => ({ + WorkspaceClient: vi.fn().mockImplementation(() => ({ + apiClient: { request: vi.fn() }, + })), + })); + + const adapter = await DatabricksAdapter.fromModelServing(); + expect(adapter).toBeInstanceOf(DatabricksAdapter); + }); + + test("throws when no endpoint name and no env var", async () => { + delete process.env.DATABRICKS_AGENT_ENDPOINT; + + await expect(DatabricksAdapter.fromModelServing()).rejects.toThrow( + "No endpoint name provided", + ); + }); + + test("explicit endpoint name takes precedence over env var", async () => { + process.env.DATABRICKS_AGENT_ENDPOINT = "env-model"; + + const apiClient = { + request: vi.fn().mockResolvedValue({ + contents: createReadableStream([textDelta("Hi"), sseChunk("[DONE]")]), + }), + }; + + const adapter = await DatabricksAdapter.fromModelServing("explicit-model", { + workspaceClient: { apiClient }, + }); + + expect(adapter).toBeInstanceOf(DatabricksAdapter); + + for await (const _ of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + // drain + } + + const [requestArgs] = apiClient.request.mock.calls[0]; + expect(requestArgs.path).toBe( + "/serving-endpoints/explicit-model/invocations", + ); + }); +}); + +describe("parseTextToolCalls", () => { + test("parses Llama JSON format", () => { + const text = + '[{"name": "analytics.query", "parameters": {"query": "SELECT 1"}}]'; + const result = parseTextToolCalls(text); + + expect(result).toEqual([ + { name: "analytics.query", args: { query: "SELECT 1" } }, + ]); + }); + + test("parses multiple Llama JSON tool calls", () => { + const text = + '[{"name": "analytics.query", "parameters": {"query": "SELECT 1"}}, {"name": "files.uploads.list", "parameters": {}}]'; + const result = parseTextToolCalls(text); + + expect(result).toHaveLength(2); + expect(result[0].name).toBe("analytics.query"); + expect(result[1].name).toBe("files.uploads.list"); + }); + + test("parses Python-style tool calls", () => { + const text = + "[analytics.query(query='SELECT * FROM trips ORDER BY date DESC LIMIT 10')]"; + const result = parseTextToolCalls(text); + + expect(result).toEqual([ + { + name: "analytics.query", + args: { + query: "SELECT * FROM trips ORDER BY date DESC LIMIT 10", + }, + }, + ]); + }); + + test("parses Python-style with multiple args", () => { + const text = + "[files.uploads.read(path='/data/file.csv', encoding='utf-8')]"; + const result = parseTextToolCalls(text); + + expect(result).toEqual([ + { + name: "files.uploads.read", + args: { path: "/data/file.csv", encoding: "utf-8" }, + }, + ]); + }); + + test("returns empty array for plain text", () => { + expect(parseTextToolCalls("Hello, how can I help?")).toEqual([]); + expect(parseTextToolCalls("")).toEqual([]); + expect(parseTextToolCalls("The answer is 42")).toEqual([]); + }); + + test("handles Llama format with 'arguments' key", () => { + const text = + '[{"name": "lakebase.query", "arguments": {"text": "SELECT 1"}}]'; + const result = parseTextToolCalls(text); + + expect(result).toEqual([ + { name: "lakebase.query", args: { text: "SELECT 1" } }, + ]); + }); +}); diff --git a/packages/appkit/tsdown.config.ts b/packages/appkit/tsdown.config.ts index 976987142..c1b875174 100644 --- a/packages/appkit/tsdown.config.ts +++ b/packages/appkit/tsdown.config.ts @@ -4,7 +4,7 @@ export default defineConfig([ { publint: true, name: "@databricks/appkit", - entry: "src/index.ts", + entry: ["src/index.ts", "src/agents/databricks.ts"], outDir: "dist", hash: false, format: "esm", diff --git a/packages/shared/src/agent.ts b/packages/shared/src/agent.ts new file mode 100644 index 000000000..c4f76b294 --- /dev/null +++ b/packages/shared/src/agent.ts @@ -0,0 +1,212 @@ +import type { JSONSchema7 } from "json-schema"; + +// --------------------------------------------------------------------------- +// Tool definitions +// --------------------------------------------------------------------------- + +export interface ToolAnnotations { + readOnly?: boolean; + destructive?: boolean; + idempotent?: boolean; + requiresUserContext?: boolean; +} + +export interface AgentToolDefinition { + name: string; + description: string; + parameters: JSONSchema7; + annotations?: ToolAnnotations; +} + +export interface ToolProvider { + getAgentTools(): AgentToolDefinition[]; + executeAgentTool( + name: string, + args: unknown, + signal?: AbortSignal, + ): Promise; +} + +// --------------------------------------------------------------------------- +// Messages & threads +// --------------------------------------------------------------------------- + +export interface Message { + id: string; + role: "user" | "assistant" | "system" | "tool"; + content: string; + toolCallId?: string; + toolCalls?: ToolCall[]; + createdAt: Date; +} + +export interface ToolCall { + id: string; + name: string; + args: unknown; +} + +export interface Thread { + id: string; + userId: string; + messages: Message[]; + createdAt: Date; + updatedAt: Date; +} + +// --------------------------------------------------------------------------- +// Thread store +// --------------------------------------------------------------------------- + +export interface ThreadStore { + create(userId: string): Promise; + get(threadId: string, userId: string): Promise; + list(userId: string): Promise; + addMessage(threadId: string, userId: string, message: Message): Promise; + delete(threadId: string, userId: string): Promise; +} + +// --------------------------------------------------------------------------- +// Agent events (SSE protocol) +// --------------------------------------------------------------------------- + +export type AgentEvent = + | { type: "message_delta"; content: string } + | { type: "message"; content: string } + | { type: "tool_call"; callId: string; name: string; args: unknown } + | { + type: "tool_result"; + callId: string; + result: unknown; + error?: string; + } + | { type: "thinking"; content: string } + | { + type: "status"; + status: "running" | "waiting" | "complete" | "error"; + error?: string; + } + | { type: "metadata"; data: Record }; + +// --------------------------------------------------------------------------- +// Responses API types (OpenAI-compatible wire format for HTTP boundary) +// Self-contained — no openai package dependency. +// --------------------------------------------------------------------------- + +export interface OutputTextContent { + type: "output_text"; + text: string; +} + +export interface ResponseOutputMessage { + type: "message"; + id: string; + status: "in_progress" | "completed"; + role: "assistant"; + content: OutputTextContent[]; +} + +export interface ResponseFunctionToolCall { + type: "function_call"; + id: string; + call_id: string; + name: string; + arguments: string; +} + +export interface ResponseFunctionCallOutput { + type: "function_call_output"; + id: string; + call_id: string; + output: string; +} + +export type ResponseOutputItem = + | ResponseOutputMessage + | ResponseFunctionToolCall + | ResponseFunctionCallOutput; + +export interface ResponseOutputItemAddedEvent { + type: "response.output_item.added"; + output_index: number; + item: ResponseOutputItem; + sequence_number: number; +} + +export interface ResponseOutputItemDoneEvent { + type: "response.output_item.done"; + output_index: number; + item: ResponseOutputItem; + sequence_number: number; +} + +export interface ResponseTextDeltaEvent { + type: "response.output_text.delta"; + item_id: string; + output_index: number; + content_index: number; + delta: string; + sequence_number: number; +} + +export interface ResponseCompletedEvent { + type: "response.completed"; + sequence_number: number; + response: Record; +} + +export interface ResponseErrorEvent { + type: "error"; + error: string; + sequence_number: number; +} + +export interface ResponseFailedEvent { + type: "response.failed"; + sequence_number: number; +} + +export interface AppKitThinkingEvent { + type: "appkit.thinking"; + content: string; + sequence_number: number; +} + +export interface AppKitMetadataEvent { + type: "appkit.metadata"; + data: Record; + sequence_number: number; +} + +export type ResponseStreamEvent = + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseTextDeltaEvent + | ResponseCompletedEvent + | ResponseErrorEvent + | ResponseFailedEvent + | AppKitThinkingEvent + | AppKitMetadataEvent; + +// --------------------------------------------------------------------------- +// Adapter contract +// --------------------------------------------------------------------------- + +export interface AgentInput { + messages: Message[]; + tools: AgentToolDefinition[]; + threadId: string; + signal?: AbortSignal; +} + +export interface AgentRunContext { + executeTool: (name: string, args: unknown) => Promise; + signal?: AbortSignal; +} + +export interface AgentAdapter { + run( + input: AgentInput, + context: AgentRunContext, + ): AsyncGenerator; +} diff --git a/packages/shared/src/index.ts b/packages/shared/src/index.ts index 627d70d6c..9829729a7 100644 --- a/packages/shared/src/index.ts +++ b/packages/shared/src/index.ts @@ -1,3 +1,4 @@ +export * from "./agent"; export * from "./cache"; export * from "./execute"; export * from "./genie"; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9ca11b818..16079b1d6 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -326,7 +326,16 @@ importers: ws: specifier: 8.18.3 version: 8.18.3(bufferutil@4.0.9) + zod: + specifier: ^4.0.0 + version: 4.1.13 devDependencies: + '@ai-sdk/openai': + specifier: 4.0.0-beta.27 + version: 4.0.0-beta.27(zod@4.1.13) + '@langchain/core': + specifier: ^1.1.39 + version: 1.1.39(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))(ws@8.18.3(bufferutil@4.0.9)) '@types/express': specifier: 4.17.25 version: 4.17.25 @@ -342,6 +351,9 @@ importers: '@vitejs/plugin-react': specifier: 5.1.1 version: 5.1.1(rolldown-vite@7.1.14(@types/node@25.2.3)(esbuild@0.25.10)(jiti@2.6.1)(terser@5.44.1)(tsx@4.20.6)(yaml@2.8.2)) + ai: + specifier: 7.0.0-beta.76 + version: 7.0.0-beta.76(zod@4.1.13) packages/appkit-ui: dependencies: @@ -561,16 +573,38 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/gateway@4.0.0-beta.43': + resolution: {integrity: sha512-EGQe4If6jt1ZhENmwZn8UAeHbEc7DRiK7ff7dwgfNthwso2hdzLbgXzuTO+W/op+oDFQK1pKiAz5RrPsVQWiew==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + + '@ai-sdk/openai@4.0.0-beta.27': + resolution: {integrity: sha512-7DpXCE4pcc4pVzuEc0whMrQN6Whi14Qsqjx97mLPGjpS6Lff48Zcn2322IFpWuhVJ10hIM1kEZNxUYvXt1O/yg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@3.0.19': resolution: {integrity: sha512-W41Wc9/jbUVXVwCN/7bWa4IKe8MtxO3EyA0Hfhx6grnmiYlCvpI8neSYWFE0zScXJkgA/YK3BRybzgyiXuu6JA==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@5.0.0-beta.16': + resolution: {integrity: sha512-CyMV5go6libw5WaZ4m7nO0uRLTENxbIODiDrTXJNwxYIBR8p5aCGaxt9oj3prbvNkTt0Srh/Gyw+n2pR9hQ5Pg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider@2.0.0': resolution: {integrity: sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA==} engines: {node: '>=18'} + '@ai-sdk/provider@4.0.0-beta.10': + resolution: {integrity: sha512-E2O/LCWjqOxAUfpykQR4xLmcGXySIu6L+wYJjav2xiHu38otPq0qIexgH9ZKulBvBWkrtJ3fxz0kzHDlCBkwng==} + engines: {node: '>=18'} + '@ai-sdk/react@2.0.115': resolution: {integrity: sha512-Etu7gWSEi2dmXss1PoR5CAZGwGShXsF9+Pon1eRO6EmatjYaBMhq1CfHPyYhGzWrint8jJIK2VaAhiMef29qZw==} engines: {node: '>=18'} @@ -1520,6 +1554,9 @@ packages: resolution: {integrity: sha512-hAs5PPKPCQ3/Nha+1fo4A4/gL85fIfxZwHPehsjCJ+BhQH2/yw6/xReuaPA/RfNQr6iz1PcD7BZcE3ctyyl3EA==} cpu: [x64] + '@cfworker/json-schema@4.1.1': + resolution: {integrity: sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==} + '@chevrotain/cst-dts-gen@11.0.3': resolution: {integrity: sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==} @@ -2646,6 +2683,10 @@ packages: '@kwsites/file-exists@1.1.1': resolution: {integrity: sha512-m9/5YGR18lIwxSFDwfE3oA7bWuq9kdau6ugN4H2rJeyhFQZcG9AgSHkQtSD15a8WvTgfz9aikZMrKPHvbpqFiw==} + '@langchain/core@1.1.39': + resolution: {integrity: sha512-DP9c7TREy6iA7HnywstmUAsNyJNYTFpRg2yBfQ+6H0l1HnvQzei9GsQ36GeOLxgRaD3vm9K8urCcawSC7yQpCw==} + engines: {node: '>=20'} + '@leichtgewicht/ip-codec@2.0.5': resolution: {integrity: sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==} @@ -5166,6 +5207,10 @@ packages: resolution: {integrity: sha512-fnYhv671l+eTTp48gB4zEsTW/YtRgRPnkI2nT7x6qw5rkI1Lq2hTmQIpHPgyThI0znLK+vX2n9XxKdXZ7BUbbw==} engines: {node: '>= 20'} + '@vercel/oidc@3.2.0': + resolution: {integrity: sha512-UycprH3T6n3jH0k44NHMa7pnFHGu/N05MjojYr+Mc6I7obkoLIJujSWwin1pCvdy/eOxrI/l3uDLQsmcrOb4ug==} + engines: {node: '>= 20'} + '@vitejs/plugin-react@5.0.4': resolution: {integrity: sha512-La0KD0vGkVkSk6K+piWDKRUyg8Rl5iAIKRMH0vMJI0Eg47bq1eOxmoObAaQG37WMW9MSyk7Cs8EIWwJC1PtzKA==} engines: {node: ^20.19.0 || >=22.12.0} @@ -5321,6 +5366,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + ai@7.0.0-beta.76: + resolution: {integrity: sha512-yJMCqsnfUi8jnFOvxmXhjMZd0YVSCLk1E5PZpqmGWynvo3uADt1XADYYYRcj0I9Q2wsL4HbCLAKe01I8aswzJg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + ajv-formats@2.1.1: resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==} peerDependencies: @@ -6453,6 +6504,10 @@ packages: supports-color: optional: true + decamelize@1.2.0: + resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} + engines: {node: '>=0.10.0'} + decimal.js-light@2.5.1: resolution: {integrity: sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==} @@ -8109,6 +8164,9 @@ packages: joi@17.13.3: resolution: {integrity: sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==} + js-tiktoken@1.0.21: + resolution: {integrity: sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g==} + js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} @@ -8238,6 +8296,26 @@ packages: resolution: {integrity: sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==} engines: {node: '>=16.0.0'} + langsmith@0.5.18: + resolution: {integrity: sha512-3zuZUWffTHQ+73EAwnodADtf534VNEZUpXr9jC12qyG8/IQuJET7PRsCpTb9wX2lmBspakwLUpqpj3tNm/0bVA==} + peerDependencies: + '@opentelemetry/api': '*' + '@opentelemetry/exporter-trace-otlp-proto': '*' + '@opentelemetry/sdk-trace-base': '*' + openai: '*' + ws: '>=7' + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@opentelemetry/exporter-trace-otlp-proto': + optional: true + '@opentelemetry/sdk-trace-base': + optional: true + openai: + optional: true + ws: + optional: true + latest-version@7.0.0: resolution: {integrity: sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==} engines: {node: '>=14.16'} @@ -8910,6 +8988,10 @@ packages: resolution: {integrity: sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==} hasBin: true + mustache@4.2.0: + resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} + hasBin: true + mute-stream@2.0.0: resolution: {integrity: sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==} engines: {node: ^18.17.0 || >=20.5.0} @@ -11463,6 +11545,10 @@ packages: resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} engines: {node: '>= 0.4.0'} + uuid@10.0.0: + resolution: {integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==} + hasBin: true + uuid@11.1.0: resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} hasBin: true @@ -11937,6 +12023,19 @@ snapshots: '@vercel/oidc': 3.0.5 zod: 4.1.13 + '@ai-sdk/gateway@4.0.0-beta.43(zod@4.1.13)': + dependencies: + '@ai-sdk/provider': 4.0.0-beta.10 + '@ai-sdk/provider-utils': 5.0.0-beta.16(zod@4.1.13) + '@vercel/oidc': 3.2.0 + zod: 4.1.13 + + '@ai-sdk/openai@4.0.0-beta.27(zod@4.1.13)': + dependencies: + '@ai-sdk/provider': 4.0.0-beta.10 + '@ai-sdk/provider-utils': 5.0.0-beta.16(zod@4.1.13) + zod: 4.1.13 + '@ai-sdk/provider-utils@3.0.19(zod@4.1.13)': dependencies: '@ai-sdk/provider': 2.0.0 @@ -11944,10 +12043,21 @@ snapshots: eventsource-parser: 3.0.6 zod: 4.1.13 + '@ai-sdk/provider-utils@5.0.0-beta.16(zod@4.1.13)': + dependencies: + '@ai-sdk/provider': 4.0.0-beta.10 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.6 + zod: 4.1.13 + '@ai-sdk/provider@2.0.0': dependencies: json-schema: 0.4.0 + '@ai-sdk/provider@4.0.0-beta.10': + dependencies: + json-schema: 0.4.0 + '@ai-sdk/react@2.0.115(react@19.2.0)(zod@4.1.13)': dependencies: '@ai-sdk/provider-utils': 3.0.19(zod@4.1.13) @@ -13078,6 +13188,8 @@ snapshots: '@cdxgen/cdxgen-plugins-bin@2.0.2': optional: true + '@cfworker/json-schema@4.1.1': {} + '@chevrotain/cst-dts-gen@11.0.3': dependencies: '@chevrotain/gast': 11.0.3 @@ -14858,6 +14970,26 @@ snapshots: transitivePeerDependencies: - supports-color + '@langchain/core@1.1.39(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))(ws@8.18.3(bufferutil@4.0.9))': + dependencies: + '@cfworker/json-schema': 4.1.1 + '@standard-schema/spec': 1.1.0 + ansi-styles: 5.2.0 + camelcase: 6.3.0 + decamelize: 1.2.0 + js-tiktoken: 1.0.21 + langsmith: 0.5.18(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))(ws@8.18.3(bufferutil@4.0.9)) + mustache: 4.2.0 + p-queue: 6.6.2 + uuid: 11.1.0 + zod: 4.1.13 + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - ws + '@leichtgewicht/ip-codec@2.0.5': {} '@mdx-js/mdx@3.1.1': @@ -17555,6 +17687,8 @@ snapshots: '@vercel/oidc@3.0.5': {} + '@vercel/oidc@3.2.0': {} + '@vitejs/plugin-react@5.0.4(vite@7.2.4(@types/node@24.7.2)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.20.6)(yaml@2.8.2))': dependencies: '@babel/core': 7.28.5 @@ -17779,6 +17913,13 @@ snapshots: '@opentelemetry/api': 1.9.0 zod: 4.1.13 + ai@7.0.0-beta.76(zod@4.1.13): + dependencies: + '@ai-sdk/gateway': 4.0.0-beta.43(zod@4.1.13) + '@ai-sdk/provider': 4.0.0-beta.10 + '@ai-sdk/provider-utils': 5.0.0-beta.16(zod@4.1.13) + zod: 4.1.13 + ajv-formats@2.1.1(ajv@8.17.1): optionalDependencies: ajv: 8.17.1 @@ -19053,6 +19194,8 @@ snapshots: dependencies: ms: 2.1.3 + decamelize@1.2.0: {} + decimal.js-light@2.5.1: {} decimal.js@10.6.0: {} @@ -20873,6 +21016,10 @@ snapshots: '@sideway/formula': 3.0.1 '@sideway/pinpoint': 2.0.0 + js-tiktoken@1.0.21: + dependencies: + base64-js: 1.5.1 + js-tokens@4.0.0: {} js-tokens@9.0.1: {} @@ -21027,6 +21174,16 @@ snapshots: vscode-languageserver-textdocument: 1.0.12 vscode-uri: 3.0.8 + langsmith@0.5.18(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))(ws@8.18.3(bufferutil@4.0.9)): + dependencies: + p-queue: 6.6.2 + uuid: 10.0.0 + optionalDependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/exporter-trace-otlp-proto': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.6.0(@opentelemetry/api@1.9.0) + ws: 8.18.3(bufferutil@4.0.9) + latest-version@7.0.0: dependencies: package-json: 8.1.1 @@ -21964,6 +22121,8 @@ snapshots: dns-packet: 5.6.1 thunky: 1.1.0 + mustache@4.2.0: {} + mute-stream@2.0.0: {} nanoid@3.3.11: {} @@ -24753,6 +24912,8 @@ snapshots: utils-merge@1.0.1: {} + uuid@10.0.0: {} + uuid@11.1.0: {} uuid@13.0.0: {}