Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions src/app-bridge.ts
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,9 @@ import {
McpUiRequestDisplayModeResult,
McpUiResourcePermissions,
McpUiToolMeta,
McpUiSamplingCreateMessageRequest,
McpUiSamplingCreateMessageRequestSchema,
McpUiSamplingCreateMessageResult,
} from "./types";
export * from "./types";
export { RESOURCE_URI_META_KEY, RESOURCE_MIME_TYPE } from "./app";
Expand Down Expand Up @@ -737,6 +740,64 @@ export class AppBridge extends Protocol<
);
}

/**
* Register a handler for sampling/createMessage requests from the view.
*
* The view sends `sampling/createMessage` requests when it needs an LLM
* completion. The host fulfills the request using its configured LLM provider.
* This handler is only called if the host advertises `sampling` in its
* capabilities during initialization.
*
* The handler receives the conversation messages, an optional system prompt,
* and an optional max token limit, and should return the LLM's response.
*
* **Security considerations:**
* - Hosts SHOULD implement rate limiting to prevent abuse
* - Hosts SHOULD apply content filtering/moderation policies
* - Hosts SHOULD log sampling requests for audit purposes
* - Hosts MAY enforce cost management controls (e.g., per-session token budgets)
*
* @param callback - Handler that receives sampling params and returns a result
* - `params.messages` - Conversation messages providing context
* - `params.systemPrompt` - Optional system prompt to guide LLM behavior
* - `params.maxTokens` - Optional maximum number of tokens to generate
* - `extra` - Request metadata (abort signal, session info)
* - Returns: `Promise<McpUiSamplingCreateMessageResult>` with model, role, content, and stopReason
*
* @example
* ```typescript
* bridge.oncreatesamplingmessage = async ({ messages, systemPrompt, maxTokens }, extra) => {
* const response = await llmProvider.createCompletion({
* messages,
* systemPrompt,
* maxTokens: maxTokens ?? 1024,
* });
* return {
* model: response.model,
* stopReason: response.stopReason,
* role: "assistant",
* content: { type: "text", text: response.text },
* };
* };
* ```
*
* @see {@link McpUiSamplingCreateMessageRequest `McpUiSamplingCreateMessageRequest`} for the request type
* @see {@link McpUiSamplingCreateMessageResult `McpUiSamplingCreateMessageResult`} for the result type
*/
set oncreatesamplingmessage(
callback: (
params: McpUiSamplingCreateMessageRequest["params"],
extra: RequestHandlerExtra,
) => Promise<McpUiSamplingCreateMessageResult>,
) {
this.setRequestHandler(
McpUiSamplingCreateMessageRequestSchema,
async (request, extra) => {
return callback(request.params, extra);
},
);
}

/**
* Register a handler for tool call requests from the view.
*
Expand Down
71 changes: 71 additions & 0 deletions src/app.ts
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ import {
McpUiToolResultNotificationSchema,
McpUiRequestDisplayModeRequest,
McpUiRequestDisplayModeResultSchema,
McpUiSamplingCreateMessageRequest,
McpUiSamplingCreateMessageResultSchema,
} from "./types";
import { Transport } from "@modelcontextprotocol/sdk/shared/transport.js";

Expand Down Expand Up @@ -970,6 +972,75 @@ export class App extends Protocol<AppRequest, AppNotification, AppResult> {
);
}

/**
* Request an LLM completion from the host.
*
* Sends a `sampling/createMessage` request to the host, which fulfills it
* using its configured LLM provider. This enables Views to leverage the
* host's language model capabilities without needing direct API access.
*
* The host must advertise `sampling` in its capabilities during initialization
* for this method to succeed. Check capabilities before calling:
*
* ```typescript
* if (app.getHostCapabilities()?.sampling) {
* const result = await app.createSamplingMessage({ ... });
* }
* ```
*
* @param params - Sampling request parameters
* - `messages` - Conversation messages providing context for the completion
* - `systemPrompt` - Optional system prompt to guide LLM behavior
* - `maxTokens` - Optional maximum number of tokens to generate
* @param options - Request options (timeout, abort signal, etc.)
* @returns Result containing the LLM's response with model info, role, content, and stop reason
*
* @throws {McpError} With `MethodNotFound` code if the host does not support sampling
* @throws {McpError} With `InvalidRequest` code if the request is malformed
* @throws {Error} If the request times out or the connection is lost
*
* @example Basic text completion
* ```typescript
* const result = await app.createSamplingMessage({
* messages: [
* { role: "user", content: { type: "text", text: "Summarize this data" } },
* ],
* maxTokens: 512,
* });
* console.log(result.content.text);
* ```
*
* @example Multi-turn conversation with system prompt
* ```typescript
* const result = await app.createSamplingMessage({
* messages: [
* { role: "user", content: { type: "text", text: "What is 2+2?" } },
* { role: "assistant", content: { type: "text", text: "4" } },
* { role: "user", content: { type: "text", text: "And 3+3?" } },
* ],
* systemPrompt: "You are a helpful math tutor.",
* maxTokens: 256,
* });
* ```
*
* @see {@link McpUiSamplingCreateMessageRequest `McpUiSamplingCreateMessageRequest`} for the request type
* @see {@link McpUiSamplingCreateMessageResult `McpUiSamplingCreateMessageResult`} for the result type
* @see {@link McpUiHostCapabilities `McpUiHostCapabilities`} for checking sampling support
*/
createSamplingMessage(
params: McpUiSamplingCreateMessageRequest["params"],
options?: RequestOptions,
) {
return this.request(
<McpUiSamplingCreateMessageRequest>{
method: "sampling/createMessage",
params,
},
McpUiSamplingCreateMessageResultSchema,
options,
);
}

/**
* Notify the host of UI size changes.
*
Expand Down
Loading
Loading