diff --git a/src/app-bridge.ts b/src/app-bridge.ts index 6e1c02172..e17550c9f 100644 --- a/src/app-bridge.ts +++ b/src/app-bridge.ts @@ -79,6 +79,9 @@ import { McpUiRequestDisplayModeResult, McpUiResourcePermissions, McpUiToolMeta, + McpUiSamplingCreateMessageRequest, + McpUiSamplingCreateMessageRequestSchema, + McpUiSamplingCreateMessageResult, } from "./types"; export * from "./types"; export { RESOURCE_URI_META_KEY, RESOURCE_MIME_TYPE } from "./app"; @@ -737,6 +740,64 @@ export class AppBridge extends Protocol< ); } + /** + * Register a handler for sampling/createMessage requests from the view. + * + * The view sends `sampling/createMessage` requests when it needs an LLM + * completion. The host fulfills the request using its configured LLM provider. + * This handler is only called if the host advertises `sampling` in its + * capabilities during initialization. + * + * The handler receives the conversation messages, an optional system prompt, + * and an optional max token limit, and should return the LLM's response. + * + * **Security considerations:** + * - Hosts SHOULD implement rate limiting to prevent abuse + * - Hosts SHOULD apply content filtering/moderation policies + * - Hosts SHOULD log sampling requests for audit purposes + * - Hosts MAY enforce cost management controls (e.g., per-session token budgets) + * + * @param callback - Handler that receives sampling params and returns a result + * - `params.messages` - Conversation messages providing context + * - `params.systemPrompt` - Optional system prompt to guide LLM behavior + * - `params.maxTokens` - Optional maximum number of tokens to generate + * - `extra` - Request metadata (abort signal, session info) + * - Returns: `Promise` with model, role, content, and stopReason + * + * @example + * ```typescript + * bridge.oncreatesamplingmessage = async ({ messages, systemPrompt, maxTokens }, extra) => { + * const response = await llmProvider.createCompletion({ + * messages, + * systemPrompt, + * maxTokens: maxTokens ?? 1024, + * }); + * return { + * model: response.model, + * stopReason: response.stopReason, + * role: "assistant", + * content: { type: "text", text: response.text }, + * }; + * }; + * ``` + * + * @see {@link McpUiSamplingCreateMessageRequest `McpUiSamplingCreateMessageRequest`} for the request type + * @see {@link McpUiSamplingCreateMessageResult `McpUiSamplingCreateMessageResult`} for the result type + */ + set oncreatesamplingmessage( + callback: ( + params: McpUiSamplingCreateMessageRequest["params"], + extra: RequestHandlerExtra, + ) => Promise, + ) { + this.setRequestHandler( + McpUiSamplingCreateMessageRequestSchema, + async (request, extra) => { + return callback(request.params, extra); + }, + ); + } + /** * Register a handler for tool call requests from the view. * diff --git a/src/app.ts b/src/app.ts index 16f8da67f..344ba6ad0 100644 --- a/src/app.ts +++ b/src/app.ts @@ -47,6 +47,8 @@ import { McpUiToolResultNotificationSchema, McpUiRequestDisplayModeRequest, McpUiRequestDisplayModeResultSchema, + McpUiSamplingCreateMessageRequest, + McpUiSamplingCreateMessageResultSchema, } from "./types"; import { Transport } from "@modelcontextprotocol/sdk/shared/transport.js"; @@ -970,6 +972,75 @@ export class App extends Protocol { ); } + /** + * Request an LLM completion from the host. + * + * Sends a `sampling/createMessage` request to the host, which fulfills it + * using its configured LLM provider. This enables Views to leverage the + * host's language model capabilities without needing direct API access. + * + * The host must advertise `sampling` in its capabilities during initialization + * for this method to succeed. Check capabilities before calling: + * + * ```typescript + * if (app.getHostCapabilities()?.sampling) { + * const result = await app.createSamplingMessage({ ... }); + * } + * ``` + * + * @param params - Sampling request parameters + * - `messages` - Conversation messages providing context for the completion + * - `systemPrompt` - Optional system prompt to guide LLM behavior + * - `maxTokens` - Optional maximum number of tokens to generate + * @param options - Request options (timeout, abort signal, etc.) + * @returns Result containing the LLM's response with model info, role, content, and stop reason + * + * @throws {McpError} With `MethodNotFound` code if the host does not support sampling + * @throws {McpError} With `InvalidRequest` code if the request is malformed + * @throws {Error} If the request times out or the connection is lost + * + * @example Basic text completion + * ```typescript + * const result = await app.createSamplingMessage({ + * messages: [ + * { role: "user", content: { type: "text", text: "Summarize this data" } }, + * ], + * maxTokens: 512, + * }); + * console.log(result.content.text); + * ``` + * + * @example Multi-turn conversation with system prompt + * ```typescript + * const result = await app.createSamplingMessage({ + * messages: [ + * { role: "user", content: { type: "text", text: "What is 2+2?" } }, + * { role: "assistant", content: { type: "text", text: "4" } }, + * { role: "user", content: { type: "text", text: "And 3+3?" } }, + * ], + * systemPrompt: "You are a helpful math tutor.", + * maxTokens: 256, + * }); + * ``` + * + * @see {@link McpUiSamplingCreateMessageRequest `McpUiSamplingCreateMessageRequest`} for the request type + * @see {@link McpUiSamplingCreateMessageResult `McpUiSamplingCreateMessageResult`} for the result type + * @see {@link McpUiHostCapabilities `McpUiHostCapabilities`} for checking sampling support + */ + createSamplingMessage( + params: McpUiSamplingCreateMessageRequest["params"], + options?: RequestOptions, + ) { + return this.request( + { + method: "sampling/createMessage", + params, + }, + McpUiSamplingCreateMessageResultSchema, + options, + ); + } + /** * Notify the host of UI size changes. * diff --git a/src/generated/schema.json b/src/generated/schema.json index d9e4b582c..f3f69fae8 100644 --- a/src/generated/schema.json +++ b/src/generated/schema.json @@ -283,6 +283,29 @@ } }, "additionalProperties": false + }, + "sampling": { + "description": "Host can fulfill sampling requests (sampling/createMessage) from the View.", + "type": "object", + "properties": { + "supportedModalities": { + "description": "Supported content modalities for sampling messages (default: [\"text\"]).", + "type": "array", + "items": { + "anyOf": [ + { + "type": "string", + "const": "text" + }, + { + "type": "string", + "const": "image" + } + ] + } + } + }, + "additionalProperties": false } }, "additionalProperties": false @@ -2629,6 +2652,29 @@ } }, "additionalProperties": false + }, + "sampling": { + "description": "Host can fulfill sampling requests (sampling/createMessage) from the View.", + "type": "object", + "properties": { + "supportedModalities": { + "description": "Supported content modalities for sampling messages (default: [\"text\"]).", + "type": "array", + "items": { + "anyOf": [ + { + "type": "string", + "const": "text" + }, + { + "type": "string", + "const": "image" + } + ] + } + } + }, + "additionalProperties": false } }, "additionalProperties": false, @@ -4041,6 +4087,186 @@ }, "additionalProperties": {} }, + "McpUiSamplingCreateMessageRequest": { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "type": "object", + "properties": { + "method": { + "type": "string", + "const": "sampling/createMessage" + }, + "params": { + "type": "object", + "properties": { + "messages": { + "type": "array", + "items": { + "type": "object", + "properties": { + "role": { + "anyOf": [ + { + "type": "string", + "const": "user" + }, + { + "type": "string", + "const": "assistant" + } + ], + "description": "The role of the message sender." + }, + "content": { + "anyOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "text" + }, + "text": { + "type": "string" + } + }, + "required": ["type", "text"], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "image" + }, + "data": { + "type": "string" + }, + "mimeType": { + "type": "string" + } + }, + "required": ["type", "data", "mimeType"], + "additionalProperties": false + } + ], + "description": "The content of the message." + } + }, + "required": ["role", "content"], + "additionalProperties": false + }, + "description": "Conversation messages providing context for the completion." + }, + "systemPrompt": { + "description": "Optional system prompt to guide the LLM's behavior.", + "type": "string" + }, + "maxTokens": { + "description": "Optional maximum number of tokens to generate.", + "type": "number" + } + }, + "required": ["messages"], + "additionalProperties": false + } + }, + "required": ["method", "params"], + "additionalProperties": false + }, + "McpUiSamplingCreateMessageResult": { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "type": "object", + "properties": { + "model": { + "type": "string", + "description": "The model that generated the completion." + }, + "stopReason": { + "type": "string", + "description": "The reason the model stopped generating (e.g., \"endTurn\", \"maxTokens\")." + }, + "role": { + "type": "string", + "const": "assistant", + "description": "The role of the generated message (always \"assistant\")." + }, + "content": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "text" + }, + "text": { + "type": "string" + } + }, + "required": ["type", "text"], + "additionalProperties": false, + "description": "The generated content." + } + }, + "required": ["model", "stopReason", "role", "content"], + "additionalProperties": {} + }, + "McpUiSamplingMessage": { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "type": "object", + "properties": { + "role": { + "anyOf": [ + { + "type": "string", + "const": "user" + }, + { + "type": "string", + "const": "assistant" + } + ], + "description": "The role of the message sender." + }, + "content": { + "anyOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "text" + }, + "text": { + "type": "string" + } + }, + "required": ["type", "text"], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "image" + }, + "data": { + "type": "string" + }, + "mimeType": { + "type": "string" + } + }, + "required": ["type", "data", "mimeType"], + "additionalProperties": false + } + ], + "description": "The content of the message." + } + }, + "required": ["role", "content"], + "additionalProperties": false + }, "McpUiSandboxProxyReadyNotification": { "$schema": "https://json-schema.org/draft/2020-12/schema", "type": "object", diff --git a/src/generated/schema.test.ts b/src/generated/schema.test.ts index 95ec2f216..2c8c0d46b 100644 --- a/src/generated/schema.test.ts +++ b/src/generated/schema.test.ts @@ -103,6 +103,18 @@ export type McpUiResourceMetaSchemaInferredType = z.infer< typeof generated.McpUiResourceMetaSchema >; +export type McpUiSamplingMessageSchemaInferredType = z.infer< + typeof generated.McpUiSamplingMessageSchema +>; + +export type McpUiSamplingCreateMessageRequestSchemaInferredType = z.infer< + typeof generated.McpUiSamplingCreateMessageRequestSchema +>; + +export type McpUiSamplingCreateMessageResultSchemaInferredType = z.infer< + typeof generated.McpUiSamplingCreateMessageResultSchema +>; + export type McpUiRequestDisplayModeRequestSchemaInferredType = z.infer< typeof generated.McpUiRequestDisplayModeRequestSchema >; @@ -261,6 +273,24 @@ expectType( ); expectType({} as McpUiResourceMetaSchemaInferredType); expectType({} as spec.McpUiResourceMeta); +expectType( + {} as McpUiSamplingMessageSchemaInferredType, +); +expectType( + {} as spec.McpUiSamplingMessage, +); +expectType( + {} as McpUiSamplingCreateMessageRequestSchemaInferredType, +); +expectType( + {} as spec.McpUiSamplingCreateMessageRequest, +); +expectType( + {} as McpUiSamplingCreateMessageResultSchemaInferredType, +); +expectType( + {} as spec.McpUiSamplingCreateMessageResult, +); expectType( {} as McpUiRequestDisplayModeRequestSchemaInferredType, ); diff --git a/src/generated/schema.ts b/src/generated/schema.ts index 9c75c3632..115a5b408 100644 --- a/src/generated/schema.ts +++ b/src/generated/schema.ts @@ -523,6 +523,21 @@ export const McpUiHostCapabilitiesSchema = z.object({ message: McpUiSupportedContentBlockModalitiesSchema.optional().describe( "Host supports receiving content messages (ui/message) from the view.", ), + /** @description Host can fulfill sampling requests (sampling/createMessage) from the View. */ + sampling: z + .object({ + /** @description Supported content modalities for sampling messages (default: ["text"]). */ + supportedModalities: z + .array(z.union([z.literal("text"), z.literal("image")])) + .optional() + .describe( + 'Supported content modalities for sampling messages (default: ["text"]).', + ), + }) + .optional() + .describe( + "Host can fulfill sampling requests (sampling/createMessage) from the View.", + ), }); /** @@ -618,6 +633,92 @@ export const McpUiResourceMetaSchema = z.object({ ), }); +/** + * @description A message in a sampling conversation. + * + * Used as input to `sampling/createMessage` requests. Each message has a role + * (user or assistant) and content that can be text or image. + */ +export const McpUiSamplingMessageSchema = z.object({ + /** @description The role of the message sender. */ + role: z + .union([z.literal("user"), z.literal("assistant")]) + .describe("The role of the message sender."), + /** @description The content of the message. */ + content: z + .union([ + z.object({ + type: z.literal("text"), + text: z.string(), + }), + z.object({ + type: z.literal("image"), + data: z.string(), + mimeType: z.string(), + }), + ]) + .describe("The content of the message."), +}); + +/** + * @description Request to create a sampling message (LLM completion) from the host. + * + * The View sends this request when it needs an LLM completion. The host + * fulfills the request using its configured LLM provider. The host MAY + * enforce rate limits, content filtering, and cost controls. + * + * Requires the host to advertise `sampling` in its capabilities. + * + * @see {@link app!App.createSamplingMessage `App.createSamplingMessage`} for the method that sends this request + */ +export const McpUiSamplingCreateMessageRequestSchema = z.object({ + method: z.literal("sampling/createMessage"), + params: z.object({ + /** @description Conversation messages providing context for the completion. */ + messages: z + .array(McpUiSamplingMessageSchema) + .describe("Conversation messages providing context for the completion."), + /** @description Optional system prompt to guide the LLM's behavior. */ + systemPrompt: z + .string() + .optional() + .describe("Optional system prompt to guide the LLM's behavior."), + /** @description Optional maximum number of tokens to generate. */ + maxTokens: z + .number() + .optional() + .describe("Optional maximum number of tokens to generate."), + }), +}); + +/** + * @description Result from a sampling/createMessage request. + * @see {@link McpUiSamplingCreateMessageRequest `McpUiSamplingCreateMessageRequest`} + */ +export const McpUiSamplingCreateMessageResultSchema = z + .object({ + /** @description The model that generated the completion. */ + model: z.string().describe("The model that generated the completion."), + /** @description The reason the model stopped generating (e.g., "endTurn", "maxTokens"). */ + stopReason: z + .string() + .describe( + 'The reason the model stopped generating (e.g., "endTurn", "maxTokens").', + ), + /** @description The role of the generated message (always "assistant"). */ + role: z + .literal("assistant") + .describe('The role of the generated message (always "assistant").'), + /** @description The generated content. */ + content: z + .object({ + type: z.literal("text"), + text: z.string(), + }) + .describe("The generated content."), + }) + .passthrough(); + /** * @description Request to change the display mode of the UI. * The host will respond with the actual display mode that was set, diff --git a/src/spec.types.ts b/src/spec.types.ts index 469ca1908..7c61e4c7f 100644 --- a/src/spec.types.ts +++ b/src/spec.types.ts @@ -473,6 +473,11 @@ export interface McpUiHostCapabilities { updateModelContext?: McpUiSupportedContentBlockModalities; /** @description Host supports receiving content messages (ui/message) from the view. */ message?: McpUiSupportedContentBlockModalities; + /** @description Host can fulfill sampling requests (sampling/createMessage) from the View. */ + sampling?: { + /** @description Supported content modalities for sampling messages (default: ["text"]). */ + supportedModalities?: Array<"text" | "image">; + }; } /** @@ -674,6 +679,64 @@ export interface McpUiResourceMeta { prefersBorder?: boolean; } +/** + * @description A message in a sampling conversation. + * + * Used as input to `sampling/createMessage` requests. Each message has a role + * (user or assistant) and content that can be text or image. + */ +export interface McpUiSamplingMessage { + /** @description The role of the message sender. */ + role: "user" | "assistant"; + /** @description The content of the message. */ + content: + | { type: "text"; text: string } + | { type: "image"; data: string; mimeType: string }; +} + +/** + * @description Request to create a sampling message (LLM completion) from the host. + * + * The View sends this request when it needs an LLM completion. The host + * fulfills the request using its configured LLM provider. The host MAY + * enforce rate limits, content filtering, and cost controls. + * + * Requires the host to advertise `sampling` in its capabilities. + * + * @see {@link app!App.createSamplingMessage `App.createSamplingMessage`} for the method that sends this request + */ +export interface McpUiSamplingCreateMessageRequest { + method: "sampling/createMessage"; + params: { + /** @description Conversation messages providing context for the completion. */ + messages: McpUiSamplingMessage[]; + /** @description Optional system prompt to guide the LLM's behavior. */ + systemPrompt?: string; + /** @description Optional maximum number of tokens to generate. */ + maxTokens?: number; + }; +} + +/** + * @description Result from a sampling/createMessage request. + * @see {@link McpUiSamplingCreateMessageRequest `McpUiSamplingCreateMessageRequest`} + */ +export interface McpUiSamplingCreateMessageResult { + /** @description The model that generated the completion. */ + model: string; + /** @description The reason the model stopped generating (e.g., "endTurn", "maxTokens"). */ + stopReason: string; + /** @description The role of the generated message (always "assistant"). */ + role: "assistant"; + /** @description The generated content. */ + content: { type: "text"; text: string }; + /** + * Index signature required for MCP SDK `Protocol` class compatibility. + * Note: The generated schema uses passthrough() to allow additional properties. + */ + [key: string]: unknown; +} + /** * @description Request to change the display mode of the UI. * The host will respond with the actual display mode that was set, @@ -771,6 +834,8 @@ export const INITIALIZED_METHOD: McpUiInitializedNotification["method"] = "ui/notifications/initialized"; export const REQUEST_DISPLAY_MODE_METHOD: McpUiRequestDisplayModeRequest["method"] = "ui/request-display-mode"; +export const SAMPLING_CREATE_MESSAGE_METHOD: McpUiSamplingCreateMessageRequest["method"] = + "sampling/createMessage"; /** * @description MCP Apps capability settings advertised by clients to servers. diff --git a/src/types.ts b/src/types.ts index a4770fdaf..46b36e724 100644 --- a/src/types.ts +++ b/src/types.ts @@ -26,6 +26,7 @@ export { INITIALIZE_METHOD, INITIALIZED_METHOD, REQUEST_DISPLAY_MODE_METHOD, + SAMPLING_CREATE_MESSAGE_METHOD, type McpUiTheme, type McpUiDisplayMode, type McpUiStyleVariableKey, @@ -62,6 +63,9 @@ export { type McpUiToolVisibility, type McpUiToolMeta, type McpUiClientCapabilities, + type McpUiSamplingMessage, + type McpUiSamplingCreateMessageRequest, + type McpUiSamplingCreateMessageResult, } from "./spec.types.js"; // Import types needed for protocol type unions (not re-exported, just used internally) @@ -72,6 +76,8 @@ import type { McpUiUpdateModelContextRequest, McpUiResourceTeardownRequest, McpUiRequestDisplayModeRequest, + McpUiSamplingCreateMessageRequest, + McpUiSamplingCreateMessageResult, McpUiHostContextChangedNotification, McpUiToolInputNotification, McpUiToolInputPartialNotification, @@ -123,6 +129,9 @@ export { McpUiRequestDisplayModeResultSchema, McpUiToolVisibilitySchema, McpUiToolMetaSchema, + McpUiSamplingMessageSchema, + McpUiSamplingCreateMessageRequestSchema, + McpUiSamplingCreateMessageResultSchema, } from "./generated/schema.js"; // Re-export SDK types used in protocol type unions @@ -163,6 +172,7 @@ export type AppRequest = | McpUiUpdateModelContextRequest | McpUiResourceTeardownRequest | McpUiRequestDisplayModeRequest + | McpUiSamplingCreateMessageRequest | CallToolRequest | ListToolsRequest | ListResourcesRequest @@ -210,6 +220,7 @@ export type AppResult = | McpUiMessageResult | McpUiResourceTeardownResult | McpUiRequestDisplayModeResult + | McpUiSamplingCreateMessageResult | CallToolResult | ListToolsResult | ListResourcesResult